repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-3x8c8-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c8-neon-mull.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mull(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc1x0 = vacc0x0;
int32x4_t vacc1x1 = vacc0x1;
int32x4_t vacc1x2 = vacc0x2;
int32x4_t vacc1x3 = vacc0x3;
int32x4_t vacc1x4 = vacc0x4;
int32x4_t vacc1x5 = vacc0x5;
int32x4_t vacc1x6 = vacc0x6;
int32x4_t vacc1x7 = vacc0x7;
int32x4_t vacc2x0 = vacc0x0;
int32x4_t vacc2x1 = vacc0x1;
int32x4_t vacc2x2 = vacc0x2;
int32x4_t vacc2x3 = vacc0x3;
int32x4_t vacc2x4 = vacc0x4;
int32x4_t vacc2x5 = vacc0x5;
int32x4_t vacc2x6 = vacc0x6;
int32x4_t vacc2x7 = vacc0x7;
size_t k = kc;
// Handle 8 bytes at a time using MUL.
while (k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t va1 = vld1_s8(a1); a1 += 8;
const int8x8_t va2 = vld1_s8(a2); a2 += 8;
const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
const int16x8_t vprod1x0 = vmull_s8(vb0, va1);
const int16x8_t vprod2x0 = vmull_s8(vb0, va2);
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0);
const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
const int16x8_t vprod1x1 = vmull_s8(vb1, va1);
const int16x8_t vprod2x1 = vmull_s8(vb1, va2);
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1);
const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
const int16x8_t vprod1x2 = vmull_s8(vb2, va1);
const int16x8_t vprod2x2 = vmull_s8(vb2, va2);
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2);
const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
const int16x8_t vprod1x3 = vmull_s8(vb3, va1);
const int16x8_t vprod2x3 = vmull_s8(vb3, va2);
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3);
const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
const int16x8_t vprod1x4 = vmull_s8(vb4, va1);
const int16x8_t vprod2x4 = vmull_s8(vb4, va2);
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4);
const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
const int16x8_t vprod1x5 = vmull_s8(vb5, va1);
const int16x8_t vprod2x5 = vmull_s8(vb5, va2);
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5);
const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
const int16x8_t vprod1x6 = vmull_s8(vb6, va1);
const int16x8_t vprod2x6 = vmull_s8(vb6, va2);
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6);
const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
const int16x8_t vprod1x7 = vmull_s8(vb7, va1);
const int16x8_t vprod2x7 = vmull_s8(vb7, va2);
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7);
k -= 8 * sizeof(int8_t);
}
#if XNN_ARCH_ARM64
const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1);
const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3);
const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5);
const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7);
const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1);
const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3);
const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5);
const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7);
int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23);
int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67);
int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23);
int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67);
#else
const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0));
const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1));
const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2));
const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3));
const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1);
const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3);
int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 );
const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4));
const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5));
const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6));
const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7));
const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5);
const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7);
int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 );
const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0));
const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1));
const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2));
const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3));
const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1);
const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3);
int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 );
const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4));
const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5));
const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6));
const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7));
const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5);
const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7);
int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 );
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vout2x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1_lane_s8(c2, vout2x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,592
| 48.977564
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-3x8c8-xw-minmax-fp32-avx2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx8c8-avx2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_3x8c8__avx2(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
const __m128i vbias0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
const __m128i vbias0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m256i vacc0x01 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x0), vbias0x1, 1);
const __m128i vbias0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
const __m128i vbias0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m256i vacc0x23 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x2), vbias0x3, 1);
const __m128i vbias0x4 = _mm_cvtsi32_si128(((const int*) w)[4]);
const __m128i vbias0x5 = _mm_cvtsi32_si128(((const int*) w)[5]);
__m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1);
const __m128i vbias0x6 = _mm_cvtsi32_si128(((const int*) w)[6]);
const __m128i vbias0x7 = _mm_cvtsi32_si128(((const int*) w)[7]);
__m256i vacc0x67 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x6), vbias0x7, 1);
__m256i vacc1x01 = vacc0x01;
__m256i vacc1x23 = vacc0x23;
__m256i vacc1x45 = vacc0x45;
__m256i vacc1x67 = vacc0x67;
__m256i vacc2x01 = vacc0x01;
__m256i vacc2x23 = vacc0x23;
__m256i vacc2x45 = vacc0x45;
__m256i vacc2x67 = vacc0x67;
w = (const int32_t*) w + 8;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a0));
const __m256i vxa0 = _mm256_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a1));
const __m256i vxa1 = _mm256_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a2));
const __m256i vxa2 = _mm256_cvtepi8_epi16(va2);
a2 += 8;
const __m256i vxb01 = _mm256_load_si256((const __m256i*) w);
vacc0x01 = _mm256_add_epi32(vacc0x01, _mm256_madd_epi16(vxa0, vxb01));
vacc1x01 = _mm256_add_epi32(vacc1x01, _mm256_madd_epi16(vxa1, vxb01));
vacc2x01 = _mm256_add_epi32(vacc2x01, _mm256_madd_epi16(vxa2, vxb01));
const __m256i vxb23 = _mm256_load_si256((const __m256i*) ((const int16_t*) w + 16));
vacc0x23 = _mm256_add_epi32(vacc0x23, _mm256_madd_epi16(vxa0, vxb23));
vacc1x23 = _mm256_add_epi32(vacc1x23, _mm256_madd_epi16(vxa1, vxb23));
vacc2x23 = _mm256_add_epi32(vacc2x23, _mm256_madd_epi16(vxa2, vxb23));
const __m256i vxb45 = _mm256_load_si256((const __m256i*) ((const int16_t*) w + 32));
vacc0x45 = _mm256_add_epi32(vacc0x45, _mm256_madd_epi16(vxa0, vxb45));
vacc1x45 = _mm256_add_epi32(vacc1x45, _mm256_madd_epi16(vxa1, vxb45));
vacc2x45 = _mm256_add_epi32(vacc2x45, _mm256_madd_epi16(vxa2, vxb45));
const __m256i vxb67 = _mm256_load_si256((const __m256i*) ((const int16_t*) w + 48));
vacc0x67 = _mm256_add_epi32(vacc0x67, _mm256_madd_epi16(vxa0, vxb67));
vacc1x67 = _mm256_add_epi32(vacc1x67, _mm256_madd_epi16(vxa1, vxb67));
vacc2x67 = _mm256_add_epi32(vacc2x67, _mm256_madd_epi16(vxa2, vxb67));
w = (const void*) ((const int16_t*) w + 64);
k += 8 * sizeof(int8_t);
}
const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23);
const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67);
const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23);
const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67);
const __m256i vacc2x0213 = _mm256_hadd_epi32(vacc2x01, vacc2x23);
const __m256i vacc2x4657 = _mm256_hadd_epi32(vacc2x45, vacc2x67);
const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657);
const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657);
const __m256i vacc2x02461357 = _mm256_hadd_epi32(vacc2x0213, vacc2x4657);
const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
__m256i vacc0x01234567 = _mm256_permutevar8x32_epi32(vacc0x02461357, vpermute_mask);
__m256i vacc1x01234567 = _mm256_permutevar8x32_epi32(vacc1x02461357, vpermute_mask);
__m256i vacc2x01234567 = _mm256_permutevar8x32_epi32(vacc2x02461357, vpermute_mask);
__m256 vscaled0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567);
__m256 vscaled1x01234567 = _mm256_cvtepi32_ps(vacc1x01234567);
__m256 vscaled2x01234567 = _mm256_cvtepi32_ps(vacc2x01234567);
const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale);
vscaled0x01234567 = _mm256_mul_ps(vscaled0x01234567, vscale);
vscaled1x01234567 = _mm256_mul_ps(vscaled1x01234567, vscale);
vscaled2x01234567 = _mm256_mul_ps(vscaled2x01234567, vscale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point);
vscaled0x01234567 = _mm256_min_ps(vscaled0x01234567, voutput_max_less_zero_point);
vscaled1x01234567 = _mm256_min_ps(vscaled1x01234567, voutput_max_less_zero_point);
vscaled2x01234567 = _mm256_min_ps(vscaled2x01234567, voutput_max_less_zero_point);
vacc0x01234567 = _mm256_cvtps_epi32(vscaled0x01234567);
vacc1x01234567 = _mm256_cvtps_epi32(vscaled1x01234567);
vacc2x01234567 = _mm256_cvtps_epi32(vscaled2x01234567);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.output_zero_point);
__m256i vacc01x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc0x01234567, vacc1x01234567), voutput_zero_point);
__m256i vacc22x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc2x01234567, vacc2x01234567), voutput_zero_point);
vacc01x01234567 = _mm256_permute4x64_epi64(vacc01x01234567, _MM_SHUFFLE(3, 1, 2, 0));
vacc22x01234567 = _mm256_permute4x64_epi64(vacc22x01234567, _MM_SHUFFLE(3, 1, 2, 0));
__m256i vout = _mm256_packs_epi16(vacc01x01234567, vacc22x01234567);
vout = _mm256_max_epi8(vout, _mm256_load_si256((const __m256i*) params->fp32_avx2.output_min));
__m128i vout_lo = _mm256_castsi256_si128(vout);
__m128i vout_hi = _mm256_extracti128_si256(vout, 1);
if (nc >= 8) {
_mm_storel_epi64((__m128i*) c0, vout_lo);
_mm_storel_epi64((__m128i*) c1, vout_hi);
_mm_storeh_pi((__m64*) c2, _mm_castsi128_ps(vout_lo));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_si32(c0, vout_lo);
_mm_storeu_si32(c1, vout_hi);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout_lo, 2));
c0 += 4;
c1 += 4;
c2 += 4;
vout_lo = _mm_srli_epi64(vout_lo, 32);
vout_hi = _mm_srli_epi64(vout_hi, 32);
}
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout_lo, 0));
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout_hi, 0));
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout_lo, 4));
c0 += 2;
c1 += 2;
c2 += 2;
vout_lo = _mm_srli_epi32(vout_lo, 16);
vout_hi = _mm_srli_epi32(vout_hi, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout_lo, 0);
*c1 = (int8_t) _mm_extract_epi8(vout_hi, 0);
*c2 = (int8_t) _mm_extract_epi8(vout_lo, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 8,832
| 40.275701
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x16c2s4-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2s4__neon_mull(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc1x89AB = vacc0x89AB;
int32x4_t vacc1xCDEF = vacc0xCDEF;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc2x89AB = vacc0x89AB;
int32x4_t vacc2xCDEF = vacc0xCDEF;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
int32x4_t vacc3x89AB = vacc0x89AB;
int32x4_t vacc3xCDEF = vacc0xCDEF;
size_t k = kc;
do {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0);
int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0);
int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0);
int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0);
int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0x0);
int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1x0);
int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2x0);
int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0x0, va3x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0x0);
int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1x0);
int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2x0);
int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0x0, va3x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va2x0 = vext_s8(va2x0, va2x0, 2);
va3x0 = vext_s8(va3x0, va3x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0);
int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0);
int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0);
int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0);
int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0x0);
int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1x0);
int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2x0);
int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1x0, va3x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0x0);
int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1x0);
int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2x0);
int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1x0, va3x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va2x0 = vext_s8(va2x0, va2x0, 2);
va3x0 = vext_s8(va3x0, va3x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0);
int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0);
int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0);
int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0);
int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0x0);
int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1x0);
int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2x0);
int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2x0, va3x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0x0);
int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1x0);
int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2x0);
int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2x0, va3x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va2x0 = vext_s8(va2x0, va2x0, 2);
va3x0 = vext_s8(va3x0, va3x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0);
int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0);
int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0);
int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0);
int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0x0);
int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1x0);
int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2x0);
int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3x0, va3x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0x0);
int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1x0);
int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2x0);
int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3x0, va3x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
k -= 8 * sizeof(int8_t);
} while (k != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc1x89AB = vqshlq_s32(vacc1x89AB, vright_pre_shift);
vacc1xCDEF = vqshlq_s32(vacc1xCDEF, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc2x89AB = vqshlq_s32(vacc2x89AB, vright_pre_shift);
vacc2xCDEF = vqshlq_s32(vacc2xCDEF, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc3x89AB = vqshlq_s32(vacc3x89AB, vright_pre_shift);
vacc3xCDEF = vqshlq_s32(vacc3xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc1x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc2x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
int16x8_t vacc3x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc1x89ABCDEF = vqaddq_s16(vacc1x89ABCDEF, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc2x89ABCDEF = vqaddq_s16(vacc2x89ABCDEF, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
vacc3x89ABCDEF = vqaddq_s16(vacc3x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc1x89ABCDEF = vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc2x89ABCDEF = vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
int16x8_t vacc3x89ABCDEF = vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc1x89ABCDEF = vqaddq_s16(vacc1x89ABCDEF, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc2x89ABCDEF = vqaddq_s16(vacc2x89ABCDEF, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
vacc3x89ABCDEF = vqaddq_s16(vacc3x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
if (nc & 8) {
vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
}
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 22,402
| 52.46778
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x16c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
// Loop over groups of 16 columns.
do {
// Initialize accumulators with bias. 16 bias values are loaded from the
// weight matrix, at the start of the group of 16 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc1x89AB = vacc0x89AB;
int32x4_t vacc1xCDEF = vacc0xCDEF;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc2x89AB = vacc0x89AB;
int32x4_t vacc2xCDEF = vacc0xCDEF;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
int32x4_t vacc3x89AB = vacc0x89AB;
int32x4_t vacc3xCDEF = vacc0xCDEF;
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 4x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8;
const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8;
// Load a 8x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 4x8 * 8x16 --> 4x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0);
vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb0123x89AB, va2x01234567, 0);
vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb0123x89AB, va3x01234567, 0);
vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1);
vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb4567x89AB, va1x01234567, 1);
vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1);
vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb4567x89AB, va2x01234567, 1);
vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1);
vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb4567x89AB, va3x01234567, 1);
vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 4x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 4;
const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 4;
// Load a 4x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 4x4 * 4x16 --> 4x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0);
vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb0123x89AB, va2x01234567, 0);
vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb0123x89AB, va3x01234567, 0);
vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc1x89AB = vqshlq_s32(vacc1x89AB, vright_pre_shift);
vacc1xCDEF = vqshlq_s32(vacc1xCDEF, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc2x89AB = vqshlq_s32(vacc2x89AB, vright_pre_shift);
vacc2xCDEF = vqshlq_s32(vacc2xCDEF, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc3x89AB = vqshlq_s32(vacc3x89AB, vright_pre_shift);
vacc3xCDEF = vqshlq_s32(vacc3xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
// Main case where there the 16 columns fit in the destination.
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
// Advance to the next 16 columns.
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
if (nc & 8) {
vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
}
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 18,194
| 54.642202
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x2-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t va2 = (int32_t) *a2++;
const int32_t va3 = (int32_t) *a3++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point;
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point;
int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point;
int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
c1[0] = (int8_t) vout1x0;
c2[0] = (int8_t) vout2x0;
c3[0] = (int8_t) vout3x0;
}
nc = 0;
}
} while (nc != 0);
}
| 6,383
| 34.270718
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x2-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t va2 = (int32_t) *a2++;
const int32_t va3 = (int32_t) *a3++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
const float vscale = params->fp32_scalar_imagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0);
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1);
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0);
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1);
int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0);
int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
vout1x0 = math_max_s32(vout1x0, vmagic_min);
vout1x1 = math_max_s32(vout1x1, vmagic_min);
vout2x0 = math_max_s32(vout2x0, vmagic_min);
vout2x1 = math_max_s32(vout2x1, vmagic_min);
vout3x0 = math_max_s32(vout3x0, vmagic_min);
vout3x1 = math_max_s32(vout3x1, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
vout1x0 = math_min_s32(vout1x0, vmagic_max);
vout1x1 = math_min_s32(vout1x1, vmagic_max);
vout2x0 = math_min_s32(vout2x0, vmagic_max);
vout2x1 = math_min_s32(vout2x1, vmagic_max);
vout3x0 = math_min_s32(vout3x0, vmagic_max);
vout3x1 = math_min_s32(vout3x1, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
vout1x0 -= vmagic_bias_less_zero_point;
vout1x1 -= vmagic_bias_less_zero_point;
vout2x0 -= vmagic_bias_less_zero_point;
vout2x1 -= vmagic_bias_less_zero_point;
vout3x0 -= vmagic_bias_less_zero_point;
vout3x1 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
c1[0] = (int8_t) vout1x0;
c2[0] = (int8_t) vout2x0;
c3[0] = (int8_t) vout3x0;
}
nc = 0;
}
} while (nc != 0);
}
| 6,026
| 30.721053
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x2-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t va2 = (int32_t) *a2++;
const int32_t va3 = (int32_t) *a3++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
c1[0] = (int8_t) vout1x0;
c2[0] = (int8_t) vout2x0;
c3[0] = (int8_t) vout3x0;
}
nc = 0;
}
} while (nc != 0);
}
| 6,204
| 33.281768
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x2-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x2__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t va2 = (int32_t) *a2++;
const int32_t va3 = (int32_t) *a3++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc2x0 = __builtin_wasm_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = __builtin_wasm_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc3x0 = __builtin_wasm_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = __builtin_wasm_max_f32(vfpacc3x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc2x0 = __builtin_wasm_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = __builtin_wasm_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc3x0 = __builtin_wasm_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = __builtin_wasm_min_f32(vfpacc3x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point;
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point;
int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point;
int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
c1[0] = (int8_t) vout1x0;
c2[0] = (int8_t) vout2x0;
c3[0] = (int8_t) vout3x0;
}
nc = 0;
}
} while (nc != 0);
}
| 6,541
| 35.143646
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x2-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t va2 = (int32_t) *a2++;
const int32_t va3 = (int32_t) *a3++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding;
const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding;
const int64_t vextacc1x0 = math_mulext_s32(vacc1x0, vmultiplier) + vrounding;
const int64_t vextacc1x1 = math_mulext_s32(vacc1x1, vmultiplier) + vrounding;
const int64_t vextacc2x0 = math_mulext_s32(vacc2x0, vmultiplier) + vrounding;
const int64_t vextacc2x1 = math_mulext_s32(vacc2x1, vmultiplier) + vrounding;
const int64_t vextacc3x0 = math_mulext_s32(vacc3x0, vmultiplier) + vrounding;
const int64_t vextacc3x1 = math_mulext_s32(vacc3x1, vmultiplier) + vrounding;
const uint32_t vshift = params->rndnu_scalar.shift;
int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift);
int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift);
int32_t vout1x0 = (int32_t) math_asr_s64(vextacc1x0, vshift);
int32_t vout1x1 = (int32_t) math_asr_s64(vextacc1x1, vshift);
int32_t vout2x0 = (int32_t) math_asr_s64(vextacc2x0, vshift);
int32_t vout2x1 = (int32_t) math_asr_s64(vextacc2x1, vshift);
int32_t vout3x0 = (int32_t) math_asr_s64(vextacc3x0, vshift);
int32_t vout3x1 = (int32_t) math_asr_s64(vextacc3x1, vshift);
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
vout3x0 = math_max_s32(vout3x0, voutput_min_less_zero_point);
vout3x1 = math_max_s32(vout3x1, voutput_min_less_zero_point);
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
vout3x0 = math_min_s32(vout3x0, voutput_max_less_zero_point);
vout3x1 = math_min_s32(vout3x1, voutput_max_less_zero_point);
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
vout0x0 += voutput_zero_point;
vout0x1 += voutput_zero_point;
vout1x0 += voutput_zero_point;
vout1x1 += voutput_zero_point;
vout2x0 += voutput_zero_point;
vout2x1 += voutput_zero_point;
vout3x0 += voutput_zero_point;
vout3x1 += voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
c1[0] = (int8_t) vout1x0;
c2[0] = (int8_t) vout2x0;
c3[0] = (int8_t) vout3x0;
}
nc = 0;
}
} while (nc != 0);
}
| 6,256
| 35.16763
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
int32_t vacc3x2 = vacc0x2;
int32_t vacc3x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t va2 = (int32_t) *a2++;
const int32_t va3 = (int32_t) *a3++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
vacc3x2 += va3 * vb2;
vacc3x3 += va3 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
vfpacc3x2 += vmagic_bias;
vfpacc3x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point;
int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point;
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point;
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point;
int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2) - vmagic_bias_less_output_zero_point;
int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3) - vmagic_bias_less_output_zero_point;
int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point;
int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point;
int32_t vout3x2 = (int32_t) float_as_uint32(vfpacc3x2) - vmagic_bias_less_output_zero_point;
int32_t vout3x3 = (int32_t) float_as_uint32(vfpacc3x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c1[2] = (int8_t) vout1x2;
c1[3] = (int8_t) vout1x3;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
c2[2] = (int8_t) vout2x2;
c2[3] = (int8_t) vout2x3;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
c3[2] = (int8_t) vout3x2;
c3[3] = (int8_t) vout3x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
vout3x0 = vout3x2;
c3 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
c1[0] = (int8_t) vout1x0;
c2[0] = (int8_t) vout2x0;
c3[0] = (int8_t) vout3x0;
}
nc = 0;
}
} while (nc != 0);
}
| 10,338
| 37.011029
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
int32_t vacc3x2 = vacc0x2;
int32_t vacc3x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t va2 = (int32_t) *a2++;
const int32_t va3 = (int32_t) *a3++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
vacc3x2 += va3 * vb2;
vacc3x3 += va3 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
const float vscale = params->fp32_scalar_imagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
vfpacc3x2 += vmagic_bias;
vfpacc3x3 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2);
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3);
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0);
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1);
int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2);
int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3);
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0);
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1);
int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2);
int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3);
int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0);
int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1);
int32_t vout3x2 = (int32_t) float_as_uint32(vfpacc3x2);
int32_t vout3x3 = (int32_t) float_as_uint32(vfpacc3x3);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
vout0x2 = math_max_s32(vout0x2, vmagic_min);
vout0x3 = math_max_s32(vout0x3, vmagic_min);
vout1x0 = math_max_s32(vout1x0, vmagic_min);
vout1x1 = math_max_s32(vout1x1, vmagic_min);
vout1x2 = math_max_s32(vout1x2, vmagic_min);
vout1x3 = math_max_s32(vout1x3, vmagic_min);
vout2x0 = math_max_s32(vout2x0, vmagic_min);
vout2x1 = math_max_s32(vout2x1, vmagic_min);
vout2x2 = math_max_s32(vout2x2, vmagic_min);
vout2x3 = math_max_s32(vout2x3, vmagic_min);
vout3x0 = math_max_s32(vout3x0, vmagic_min);
vout3x1 = math_max_s32(vout3x1, vmagic_min);
vout3x2 = math_max_s32(vout3x2, vmagic_min);
vout3x3 = math_max_s32(vout3x3, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
vout0x2 = math_min_s32(vout0x2, vmagic_max);
vout0x3 = math_min_s32(vout0x3, vmagic_max);
vout1x0 = math_min_s32(vout1x0, vmagic_max);
vout1x1 = math_min_s32(vout1x1, vmagic_max);
vout1x2 = math_min_s32(vout1x2, vmagic_max);
vout1x3 = math_min_s32(vout1x3, vmagic_max);
vout2x0 = math_min_s32(vout2x0, vmagic_max);
vout2x1 = math_min_s32(vout2x1, vmagic_max);
vout2x2 = math_min_s32(vout2x2, vmagic_max);
vout2x3 = math_min_s32(vout2x3, vmagic_max);
vout3x0 = math_min_s32(vout3x0, vmagic_max);
vout3x1 = math_min_s32(vout3x1, vmagic_max);
vout3x2 = math_min_s32(vout3x2, vmagic_max);
vout3x3 = math_min_s32(vout3x3, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
vout0x2 -= vmagic_bias_less_zero_point;
vout0x3 -= vmagic_bias_less_zero_point;
vout1x0 -= vmagic_bias_less_zero_point;
vout1x1 -= vmagic_bias_less_zero_point;
vout1x2 -= vmagic_bias_less_zero_point;
vout1x3 -= vmagic_bias_less_zero_point;
vout2x0 -= vmagic_bias_less_zero_point;
vout2x1 -= vmagic_bias_less_zero_point;
vout2x2 -= vmagic_bias_less_zero_point;
vout2x3 -= vmagic_bias_less_zero_point;
vout3x0 -= vmagic_bias_less_zero_point;
vout3x1 -= vmagic_bias_less_zero_point;
vout3x2 -= vmagic_bias_less_zero_point;
vout3x3 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c1[2] = (int8_t) vout1x2;
c1[3] = (int8_t) vout1x3;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
c2[2] = (int8_t) vout2x2;
c2[3] = (int8_t) vout2x3;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
c3[2] = (int8_t) vout3x2;
c3[3] = (int8_t) vout3x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
vout3x0 = vout3x2;
c3 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
c1[0] = (int8_t) vout1x0;
c2[0] = (int8_t) vout2x0;
c3[0] = (int8_t) vout3x0;
}
nc = 0;
}
} while (nc != 0);
}
| 9,701
| 32.570934
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
int32_t vacc3x2 = vacc0x2;
int32_t vacc3x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t va2 = (int32_t) *a2++;
const int32_t va3 = (int32_t) *a3++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
vacc3x2 += va3 * vb2;
vacc3x3 += va3 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
const int32_t vrndacc3x2 = (int32_t) lrintf(vfpacc3x2);
const int32_t vrndacc3x3 = (int32_t) lrintf(vfpacc3x3);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
int32_t vout3x2 = vrndacc3x2 + voutput_zero_point;
int32_t vout3x3 = vrndacc3x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c1[2] = (int8_t) vout1x2;
c1[3] = (int8_t) vout1x3;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
c2[2] = (int8_t) vout2x2;
c2[3] = (int8_t) vout2x3;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
c3[2] = (int8_t) vout3x2;
c3[3] = (int8_t) vout3x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
vout3x0 = vout3x2;
c3 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
c1[0] = (int8_t) vout1x0;
c2[0] = (int8_t) vout2x0;
c3[0] = (int8_t) vout3x0;
}
nc = 0;
}
} while (nc != 0);
}
| 10,063
| 36
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
int32_t vacc3x2 = vacc0x2;
int32_t vacc3x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t va2 = (int32_t) *a2++;
const int32_t va3 = (int32_t) *a3++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
vacc3x2 += va3 * vb2;
vacc3x3 += va3 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = __builtin_wasm_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = __builtin_wasm_max_f32(vfpacc0x3, voutput_min_less_zero_point);
vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc1x2 = __builtin_wasm_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = __builtin_wasm_max_f32(vfpacc1x3, voutput_min_less_zero_point);
vfpacc2x0 = __builtin_wasm_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = __builtin_wasm_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc2x2 = __builtin_wasm_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = __builtin_wasm_max_f32(vfpacc2x3, voutput_min_less_zero_point);
vfpacc3x0 = __builtin_wasm_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = __builtin_wasm_max_f32(vfpacc3x1, voutput_min_less_zero_point);
vfpacc3x2 = __builtin_wasm_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = __builtin_wasm_max_f32(vfpacc3x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = __builtin_wasm_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = __builtin_wasm_min_f32(vfpacc0x3, voutput_max_less_zero_point);
vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc1x2 = __builtin_wasm_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = __builtin_wasm_min_f32(vfpacc1x3, voutput_max_less_zero_point);
vfpacc2x0 = __builtin_wasm_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = __builtin_wasm_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc2x2 = __builtin_wasm_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = __builtin_wasm_min_f32(vfpacc2x3, voutput_max_less_zero_point);
vfpacc3x0 = __builtin_wasm_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = __builtin_wasm_min_f32(vfpacc3x1, voutput_max_less_zero_point);
vfpacc3x2 = __builtin_wasm_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = __builtin_wasm_min_f32(vfpacc3x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
vfpacc3x2 += vmagic_bias;
vfpacc3x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point;
int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point;
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point;
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point;
int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2) - vmagic_bias_less_output_zero_point;
int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3) - vmagic_bias_less_output_zero_point;
int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point;
int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point;
int32_t vout3x2 = (int32_t) float_as_uint32(vfpacc3x2) - vmagic_bias_less_output_zero_point;
int32_t vout3x3 = (int32_t) float_as_uint32(vfpacc3x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c1[2] = (int8_t) vout1x2;
c1[3] = (int8_t) vout1x3;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
c2[2] = (int8_t) vout2x2;
c2[3] = (int8_t) vout2x3;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
c3[2] = (int8_t) vout3x2;
c3[3] = (int8_t) vout3x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
vout3x0 = vout3x2;
c3 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
c1[0] = (int8_t) vout1x0;
c2[0] = (int8_t) vout2x0;
c3[0] = (int8_t) vout3x0;
}
nc = 0;
}
} while (nc != 0);
}
| 10,656
| 38.180147
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
int32_t vacc3x2 = vacc0x2;
int32_t vacc3x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t va2 = (int32_t) *a2++;
const int32_t va3 = (int32_t) *a3++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
vacc3x2 += va3 * vb2;
vacc3x3 += va3 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding;
const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding;
const int64_t vextacc0x2 = math_mulext_s32(vacc0x2, vmultiplier) + vrounding;
const int64_t vextacc0x3 = math_mulext_s32(vacc0x3, vmultiplier) + vrounding;
const int64_t vextacc1x0 = math_mulext_s32(vacc1x0, vmultiplier) + vrounding;
const int64_t vextacc1x1 = math_mulext_s32(vacc1x1, vmultiplier) + vrounding;
const int64_t vextacc1x2 = math_mulext_s32(vacc1x2, vmultiplier) + vrounding;
const int64_t vextacc1x3 = math_mulext_s32(vacc1x3, vmultiplier) + vrounding;
const int64_t vextacc2x0 = math_mulext_s32(vacc2x0, vmultiplier) + vrounding;
const int64_t vextacc2x1 = math_mulext_s32(vacc2x1, vmultiplier) + vrounding;
const int64_t vextacc2x2 = math_mulext_s32(vacc2x2, vmultiplier) + vrounding;
const int64_t vextacc2x3 = math_mulext_s32(vacc2x3, vmultiplier) + vrounding;
const int64_t vextacc3x0 = math_mulext_s32(vacc3x0, vmultiplier) + vrounding;
const int64_t vextacc3x1 = math_mulext_s32(vacc3x1, vmultiplier) + vrounding;
const int64_t vextacc3x2 = math_mulext_s32(vacc3x2, vmultiplier) + vrounding;
const int64_t vextacc3x3 = math_mulext_s32(vacc3x3, vmultiplier) + vrounding;
const uint32_t vshift = params->rndnu_scalar.shift;
int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift);
int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift);
int32_t vout0x2 = (int32_t) math_asr_s64(vextacc0x2, vshift);
int32_t vout0x3 = (int32_t) math_asr_s64(vextacc0x3, vshift);
int32_t vout1x0 = (int32_t) math_asr_s64(vextacc1x0, vshift);
int32_t vout1x1 = (int32_t) math_asr_s64(vextacc1x1, vshift);
int32_t vout1x2 = (int32_t) math_asr_s64(vextacc1x2, vshift);
int32_t vout1x3 = (int32_t) math_asr_s64(vextacc1x3, vshift);
int32_t vout2x0 = (int32_t) math_asr_s64(vextacc2x0, vshift);
int32_t vout2x1 = (int32_t) math_asr_s64(vextacc2x1, vshift);
int32_t vout2x2 = (int32_t) math_asr_s64(vextacc2x2, vshift);
int32_t vout2x3 = (int32_t) math_asr_s64(vextacc2x3, vshift);
int32_t vout3x0 = (int32_t) math_asr_s64(vextacc3x0, vshift);
int32_t vout3x1 = (int32_t) math_asr_s64(vextacc3x1, vshift);
int32_t vout3x2 = (int32_t) math_asr_s64(vextacc3x2, vshift);
int32_t vout3x3 = (int32_t) math_asr_s64(vextacc3x3, vshift);
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
vout1x2 = math_max_s32(vout1x2, voutput_min_less_zero_point);
vout1x3 = math_max_s32(vout1x3, voutput_min_less_zero_point);
vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
vout2x2 = math_max_s32(vout2x2, voutput_min_less_zero_point);
vout2x3 = math_max_s32(vout2x3, voutput_min_less_zero_point);
vout3x0 = math_max_s32(vout3x0, voutput_min_less_zero_point);
vout3x1 = math_max_s32(vout3x1, voutput_min_less_zero_point);
vout3x2 = math_max_s32(vout3x2, voutput_min_less_zero_point);
vout3x3 = math_max_s32(vout3x3, voutput_min_less_zero_point);
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
vout1x2 = math_min_s32(vout1x2, voutput_max_less_zero_point);
vout1x3 = math_min_s32(vout1x3, voutput_max_less_zero_point);
vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
vout2x2 = math_min_s32(vout2x2, voutput_max_less_zero_point);
vout2x3 = math_min_s32(vout2x3, voutput_max_less_zero_point);
vout3x0 = math_min_s32(vout3x0, voutput_max_less_zero_point);
vout3x1 = math_min_s32(vout3x1, voutput_max_less_zero_point);
vout3x2 = math_min_s32(vout3x2, voutput_max_less_zero_point);
vout3x3 = math_min_s32(vout3x3, voutput_max_less_zero_point);
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
vout0x0 += voutput_zero_point;
vout0x1 += voutput_zero_point;
vout0x2 += voutput_zero_point;
vout0x3 += voutput_zero_point;
vout1x0 += voutput_zero_point;
vout1x1 += voutput_zero_point;
vout1x2 += voutput_zero_point;
vout1x3 += voutput_zero_point;
vout2x0 += voutput_zero_point;
vout2x1 += voutput_zero_point;
vout2x2 += voutput_zero_point;
vout2x3 += voutput_zero_point;
vout3x0 += voutput_zero_point;
vout3x1 += voutput_zero_point;
vout3x2 += voutput_zero_point;
vout3x3 += voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c1[2] = (int8_t) vout1x2;
c1[3] = (int8_t) vout1x3;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
c2[2] = (int8_t) vout2x2;
c2[3] = (int8_t) vout2x3;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
c3[2] = (int8_t) vout3x2;
c3[3] = (int8_t) vout3x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c2[0] = (int8_t) vout2x0;
c2[1] = (int8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c3[0] = (int8_t) vout3x0;
c3[1] = (int8_t) vout3x1;
vout3x0 = vout3x2;
c3 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
c1[0] = (int8_t) vout1x0;
c2[0] = (int8_t) vout2x0;
c3[0] = (int8_t) vout3x0;
}
nc = 0;
}
} while (nc != 0);
}
| 10,083
| 38.390625
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 10,763
| 40.4
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 10,877
| 40.519084
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min);
vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
*c1 = (int8_t) _mm_extract_epi16(vout, 2);
*c2 = (int8_t) _mm_extract_epi16(vout, 4);
*c3 = (int8_t) _mm_extract_epi16(vout, 6);
}
nc = 0;
}
} while (nc != 0);
}
| 11,447
| 41.876404
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min);
vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
*c1 = (int8_t) _mm_extract_epi16(vout, 2);
*c2 = (int8_t) _mm_extract_epi16(vout, 4);
*c3 = (int8_t) _mm_extract_epi16(vout, 6);
}
nc = 0;
}
} while (nc != 0);
}
| 11,537
| 42.213483
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 10,765
| 40.407692
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 10,879
| 40.526718
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1);
a1 += 8;
const v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2);
a2 += 8;
const v128_t vxa3 = wasm_i16x8_load8x8((const v128_t*) a3);
a3 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01);
const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23);
const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 3, 3, 3, 3), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const v128_t vxa1 = wasm_i16x8_load8x8(a1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const v128_t vxa2 = wasm_i16x8_load8x8(a2);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const v128_t vxa3 = wasm_i16x8_load8x8(a3);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const v128_t vxb0 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(int8_t)) {
const v128_t vxb1 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(int8_t)) {
const v128_t vxb2 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
}
}
}
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c3, vout, 3);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c3, vout, 6);
c3 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c3, vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 10,195
| 38.984314
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1);
a1 += 8;
const v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2);
a2 += 8;
const v128_t vxa3 = wasm_i16x8_load8x8((const v128_t*) a3);
a3 += 8;
const v128_t vxb0 = wasm_i16x8_load8x8(w);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 3, 3, 3, 3), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const v128_t vxa1 = wasm_i16x8_load8x8(a1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const v128_t vxa2 = wasm_i16x8_load8x8(a2);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const v128_t vxa3 = wasm_i16x8_load8x8(a3);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const v128_t vxb0 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(int8_t)) {
const v128_t vxb1 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(int8_t)) {
const v128_t vxb2 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
}
}
}
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c3, vout, 3);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c3, vout, 6);
c3 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c3, vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 10,095
| 38.905138
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123);
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc3x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123);
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123);
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123);
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 10,452
| 38.594697
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc3x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123);
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123);
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123);
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 10,566
| 38.725564
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-xw-minmax-fp32-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_4x4c2__avx(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vxb1 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vxb2 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 10,519
| 40.254902
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-xw-minmax-fp32-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_4x4c2__sse2(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8);
a3 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vxb1 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vxb2 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min);
vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
*c1 = (int8_t) _mm_extract_epi16(vout, 2);
*c2 = (int8_t) _mm_extract_epi16(vout, 4);
*c3 = (int8_t) _mm_extract_epi16(vout, 6);
}
nc = 0;
}
} while (nc != 0);
}
| 11,011
| 41.353846
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-xw-minmax-fp32-sse41.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_4x4c2__sse41(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vxb1 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vxb2 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 10,521
| 40.262745
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-xw-minmax-fp32-wasmsimd-dot16x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1);
a1 += 8;
const v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2);
a2 += 8;
const v128_t vxa3 = wasm_i16x8_load8x8((const v128_t*) a3);
a3 += 8;
const v128_t vxb0 = wasm_v128_load(w);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
const v128_t vxb1 = wasm_v128_load((const int16_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
const v128_t vxb2 = wasm_v128_load((const int16_t*) w + 16);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
const v128_t vxb3 = wasm_v128_load((const int16_t*) w + 24);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 3, 3, 3, 3), vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const v128_t vxa1 = wasm_i16x8_load8x8(a1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const v128_t vxa2 = wasm_i16x8_load8x8(a2);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const v128_t vxa3 = wasm_i16x8_load8x8(a3);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const v128_t vxb0 = wasm_v128_load(w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(int8_t)) {
const v128_t vxb1 = wasm_v128_load(w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(int8_t)) {
const v128_t vxb2 = wasm_v128_load(w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
}
}
}
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c3, vout, 3);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c3, vout, 6);
c3 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c3, vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 10,072
| 38.814229
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2-xw-minmax-fp32-xop.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_4x4c2__xop(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123);
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123);
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123);
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc3x0123);
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 = (const int8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 = (const int8_t*) ((uintptr_t) a3 + k);
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123);
if (k > 2 * sizeof(int8_t)) {
const __m128i vxb1 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123);
if (k > 4 * sizeof(int8_t)) {
const __m128i vxb2 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123);
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 10,208
| 38.416988
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2s4__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 7,809
| 38.246231
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2s4__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 7,923
| 38.422886
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2s4__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min);
vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
*c1 = (int8_t) _mm_extract_epi16(vout, 2);
*c2 = (int8_t) _mm_extract_epi16(vout, 4);
*c3 = (int8_t) _mm_extract_epi16(vout, 6);
}
nc = 0;
}
} while (nc != 0);
}
| 8,325
| 39.417476
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2s4__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min);
vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
*c1 = (int8_t) _mm_extract_epi16(vout, 2);
*c2 = (int8_t) _mm_extract_epi16(vout, 4);
*c3 = (int8_t) _mm_extract_epi16(vout, 6);
}
nc = 0;
}
} while (nc != 0);
}
| 8,415
| 39.854369
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2s4__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 7,811
| 38.256281
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2s4__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 7,925
| 38.432836
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2s4__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
a0 += 8;
v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1);
a1 += 8;
v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2);
a2 += 8;
v128_t vxa3 = wasm_i16x8_load8x8((const v128_t*) a3);
a3 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01);
const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb0));
vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb1));
vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23);
const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb2));
vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3));
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb3));
w = (const int8_t*) w + 32;
k -= 8 * sizeof(int8_t);
} while (k != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c3, vout, 3);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c3, vout, 6);
c3 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c3, vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 7,698
| 37.303483
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2s4__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
a0 += 8;
v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1);
a1 += 8;
v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2);
a2 += 8;
v128_t vxa3 = wasm_i16x8_load8x8((const v128_t*) a3);
a3 += 8;
const v128_t vxb0 = wasm_i16x8_load8x8(w);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb0));
vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb1));
vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb2));
vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3));
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb3));
w = (const int8_t*) w + 32;
k -= 8 * sizeof(int8_t);
} while (k != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c3, vout, 3);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c3, vout, 6);
c3 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c3, vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 7,598
| 37.18593
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2s4__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb0, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb1, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb2, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb3, vacc3x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 7,638
| 36.630542
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_4x4c2s4__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb0, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb1, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb2, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb3, vacc3x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 7,752
| 36.819512
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-xw-minmax-fp32-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_4x4c2s4__avx(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 7,721
| 38.19797
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-xw-minmax-fp32-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_4x4c2s4__sse2(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8);
a3 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min);
vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout));
vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1));
unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
*c1 = (int8_t) _mm_extract_epi16(vout, 2);
*c2 = (int8_t) _mm_extract_epi16(vout, 4);
*c3 = (int8_t) _mm_extract_epi16(vout, 6);
}
nc = 0;
}
} while (nc != 0);
}
| 8,117
| 39.188119
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-xw-minmax-fp32-sse41.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_4x4c2s4__sse41(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 7,723
| 38.208122
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x4c2s4-xw-minmax-fp32-xop.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_4x4c2s4__xop(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepi8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepi8_epi16(va3);
a3 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb0, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb1, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb2, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb3, vacc3x0123);
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c2 = (int8_t) _mm_extract_epi8(vout, 8);
*c3 = (int8_t) _mm_extract_epi8(vout, 12);
}
nc = 0;
}
} while (nc != 0);
}
| 7,550
| 36.567164
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8-minmax-rndnu-neon-mlal-lane-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/prefetch.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8__neon_mlal_lane_prfm(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t va1 = vld1_s8(a1); a1 += 8;
const int16x8_t vxa1 = vmovl_s8(va1);
const int8x8_t va2 = vld1_s8(a2); a2 += 8;
const int16x8_t vxa2 = vmovl_s8(va2);
const int8x8_t va3 = vld1_s8(a3); a3 += 8;
const int16x8_t vxa3 = vmovl_s8(va3);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
xnn_prefetch_to_l1((const int8_t*) w + 448);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa3), 3);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa3), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int16x8_t vxa1 = vmovl_s8(va1);
const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
const int16x8_t vxa2 = vmovl_s8(va2);
const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
const int16x8_t vxa3 = vmovl_s8(va3);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
}
}
}
}
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 24,307
| 60.383838
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8-minmax-rndnu-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t va1 = vld1_s8(a1); a1 += 8;
const int16x8_t vxa1 = vmovl_s8(va1);
const int8x8_t va2 = vld1_s8(a2); a2 += 8;
const int16x8_t vxa2 = vmovl_s8(va2);
const int8x8_t va3 = vld1_s8(a3); a3 += 8;
const int16x8_t vxa3 = vmovl_s8(va3);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa3), 3);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa3), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int16x8_t vxa1 = vmovl_s8(va1);
const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
const int16x8_t vxa2 = vmovl_s8(va2);
const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
const int16x8_t vxa3 = vmovl_s8(va3);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
}
}
}
}
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 24,221
| 60.477157
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c16-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c16-neon-mlal.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c16__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 16 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc1x0 = vacc0x0;
int32x4_t vacc1x1 = vacc0x1;
int32x4_t vacc1x2 = vacc0x2;
int32x4_t vacc1x3 = vacc0x3;
int32x4_t vacc1x4 = vacc0x4;
int32x4_t vacc1x5 = vacc0x5;
int32x4_t vacc1x6 = vacc0x6;
int32x4_t vacc1x7 = vacc0x7;
int32x4_t vacc2x0 = vacc0x0;
int32x4_t vacc2x1 = vacc0x1;
int32x4_t vacc2x2 = vacc0x2;
int32x4_t vacc2x3 = vacc0x3;
int32x4_t vacc2x4 = vacc0x4;
int32x4_t vacc2x5 = vacc0x5;
int32x4_t vacc2x6 = vacc0x6;
int32x4_t vacc2x7 = vacc0x7;
int32x4_t vacc3x0 = vacc0x0;
int32x4_t vacc3x1 = vacc0x1;
int32x4_t vacc3x2 = vacc0x2;
int32x4_t vacc3x3 = vacc0x3;
int32x4_t vacc3x4 = vacc0x4;
int32x4_t vacc3x5 = vacc0x5;
int32x4_t vacc3x6 = vacc0x6;
int32x4_t vacc3x7 = vacc0x7;
// KC loop of 16
size_t k = kc;
while (k != 0) {
const int8x16_t va0 = vld1q_s8(a0); a0 += 16;
const int8x16_t va1 = vld1q_s8(a1); a1 += 16;
const int8x16_t va2 = vld1q_s8(a2); a2 += 16;
const int8x16_t va3 = vld1q_s8(a3); a3 += 16;
const int8x16_t vb0 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb1 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb2 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb3 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb4 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb5 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb6 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb7 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
int16x8_t vprod0x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va0));
int16x8_t vprod1x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va1));
int16x8_t vprod2x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va2));
int16x8_t vprod3x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va3));
vprod0x0 = vmlal_s8(vprod0x0, vget_high_s8(vb0), vget_high_s8(va0));
vprod1x0 = vmlal_s8(vprod1x0, vget_high_s8(vb0), vget_high_s8(va1));
vprod2x0 = vmlal_s8(vprod2x0, vget_high_s8(vb0), vget_high_s8(va2));
vprod3x0 = vmlal_s8(vprod3x0, vget_high_s8(vb0), vget_high_s8(va3));
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0);
vacc3x0 = vpadalq_s16(vacc3x0, vprod3x0);
int16x8_t vprod0x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va0));
int16x8_t vprod1x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va1));
int16x8_t vprod2x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va2));
int16x8_t vprod3x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va3));
vprod0x1 = vmlal_s8(vprod0x1, vget_high_s8(vb1), vget_high_s8(va0));
vprod1x1 = vmlal_s8(vprod1x1, vget_high_s8(vb1), vget_high_s8(va1));
vprod2x1 = vmlal_s8(vprod2x1, vget_high_s8(vb1), vget_high_s8(va2));
vprod3x1 = vmlal_s8(vprod3x1, vget_high_s8(vb1), vget_high_s8(va3));
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1);
vacc3x1 = vpadalq_s16(vacc3x1, vprod3x1);
int16x8_t vprod0x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va0));
int16x8_t vprod1x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va1));
int16x8_t vprod2x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va2));
int16x8_t vprod3x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va3));
vprod0x2 = vmlal_s8(vprod0x2, vget_high_s8(vb2), vget_high_s8(va0));
vprod1x2 = vmlal_s8(vprod1x2, vget_high_s8(vb2), vget_high_s8(va1));
vprod2x2 = vmlal_s8(vprod2x2, vget_high_s8(vb2), vget_high_s8(va2));
vprod3x2 = vmlal_s8(vprod3x2, vget_high_s8(vb2), vget_high_s8(va3));
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2);
vacc3x2 = vpadalq_s16(vacc3x2, vprod3x2);
int16x8_t vprod0x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va0));
int16x8_t vprod1x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va1));
int16x8_t vprod2x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va2));
int16x8_t vprod3x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va3));
vprod0x3 = vmlal_s8(vprod0x3, vget_high_s8(vb3), vget_high_s8(va0));
vprod1x3 = vmlal_s8(vprod1x3, vget_high_s8(vb3), vget_high_s8(va1));
vprod2x3 = vmlal_s8(vprod2x3, vget_high_s8(vb3), vget_high_s8(va2));
vprod3x3 = vmlal_s8(vprod3x3, vget_high_s8(vb3), vget_high_s8(va3));
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3);
vacc3x3 = vpadalq_s16(vacc3x3, vprod3x3);
int16x8_t vprod0x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va0));
int16x8_t vprod1x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va1));
int16x8_t vprod2x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va2));
int16x8_t vprod3x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va3));
vprod0x4 = vmlal_s8(vprod0x4, vget_high_s8(vb4), vget_high_s8(va0));
vprod1x4 = vmlal_s8(vprod1x4, vget_high_s8(vb4), vget_high_s8(va1));
vprod2x4 = vmlal_s8(vprod2x4, vget_high_s8(vb4), vget_high_s8(va2));
vprod3x4 = vmlal_s8(vprod3x4, vget_high_s8(vb4), vget_high_s8(va3));
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4);
vacc3x4 = vpadalq_s16(vacc3x4, vprod3x4);
int16x8_t vprod0x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va0));
int16x8_t vprod1x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va1));
int16x8_t vprod2x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va2));
int16x8_t vprod3x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va3));
vprod0x5 = vmlal_s8(vprod0x5, vget_high_s8(vb5), vget_high_s8(va0));
vprod1x5 = vmlal_s8(vprod1x5, vget_high_s8(vb5), vget_high_s8(va1));
vprod2x5 = vmlal_s8(vprod2x5, vget_high_s8(vb5), vget_high_s8(va2));
vprod3x5 = vmlal_s8(vprod3x5, vget_high_s8(vb5), vget_high_s8(va3));
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5);
vacc3x5 = vpadalq_s16(vacc3x5, vprod3x5);
int16x8_t vprod0x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va0));
int16x8_t vprod1x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va1));
int16x8_t vprod2x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va2));
int16x8_t vprod3x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va3));
vprod0x6 = vmlal_s8(vprod0x6, vget_high_s8(vb6), vget_high_s8(va0));
vprod1x6 = vmlal_s8(vprod1x6, vget_high_s8(vb6), vget_high_s8(va1));
vprod2x6 = vmlal_s8(vprod2x6, vget_high_s8(vb6), vget_high_s8(va2));
vprod3x6 = vmlal_s8(vprod3x6, vget_high_s8(vb6), vget_high_s8(va3));
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6);
vacc3x6 = vpadalq_s16(vacc3x6, vprod3x6);
int16x8_t vprod0x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va0));
int16x8_t vprod1x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va1));
int16x8_t vprod2x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va2));
int16x8_t vprod3x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va3));
vprod0x7 = vmlal_s8(vprod0x7, vget_high_s8(vb7), vget_high_s8(va0));
vprod1x7 = vmlal_s8(vprod1x7, vget_high_s8(vb7), vget_high_s8(va1));
vprod2x7 = vmlal_s8(vprod2x7, vget_high_s8(vb7), vget_high_s8(va2));
vprod3x7 = vmlal_s8(vprod3x7, vget_high_s8(vb7), vget_high_s8(va3));
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7);
vacc3x7 = vpadalq_s16(vacc3x7, vprod3x7);
k -= 16 * sizeof(int8_t);
}
#if XNN_ARCH_ARM64
const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1);
const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3);
const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5);
const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7);
const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1);
const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3);
const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5);
const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7);
const int32x4_t vsum3x01 = vpaddq_s32(vacc3x0, vacc3x1);
const int32x4_t vsum3x23 = vpaddq_s32(vacc3x2, vacc3x3);
const int32x4_t vsum3x45 = vpaddq_s32(vacc3x4, vacc3x5);
const int32x4_t vsum3x67 = vpaddq_s32(vacc3x6, vacc3x7);
int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23);
int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67);
int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23);
int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67);
int32x4_t vacc3x0123 = vpaddq_s32(vsum3x01, vsum3x23);
int32x4_t vacc3x4567 = vpaddq_s32(vsum3x45, vsum3x67);
#else
const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0));
const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1));
const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2));
const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3));
const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1);
const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3);
int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 );
const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4));
const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5));
const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6));
const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7));
const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5);
const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7);
int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 );
const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0));
const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1));
const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2));
const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3));
const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1);
const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3);
int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 );
const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4));
const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5));
const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6));
const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7));
const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5);
const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7);
int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 );
const int32x2_t vpsum3x0 = vadd_s32(vget_low_s32(vacc3x0), vget_high_s32(vacc3x0));
const int32x2_t vpsum3x1 = vadd_s32(vget_low_s32(vacc3x1), vget_high_s32(vacc3x1));
const int32x2_t vpsum3x2 = vadd_s32(vget_low_s32(vacc3x2), vget_high_s32(vacc3x2));
const int32x2_t vpsum3x3 = vadd_s32(vget_low_s32(vacc3x3), vget_high_s32(vacc3x3));
const int32x2_t vsum3x01 = vpadd_s32(vpsum3x0, vpsum3x1);
const int32x2_t vsum3x23 = vpadd_s32(vpsum3x2, vpsum3x3);
int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23 );
const int32x2_t vpsum3x4 = vadd_s32(vget_low_s32(vacc3x4), vget_high_s32(vacc3x4));
const int32x2_t vpsum3x5 = vadd_s32(vget_low_s32(vacc3x5), vget_high_s32(vacc3x5));
const int32x2_t vpsum3x6 = vadd_s32(vget_low_s32(vacc3x6), vget_high_s32(vacc3x6));
const int32x2_t vpsum3x7 = vadd_s32(vget_low_s32(vacc3x7), vget_high_s32(vacc3x7));
const int32x2_t vsum3x45 = vpadd_s32(vpsum3x4, vpsum3x5);
const int32x2_t vsum3x67 = vpadd_s32(vpsum3x6, vpsum3x7);
int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67 );
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 22,471
| 55.320802
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c2-minmax-rndnu-neon-mull-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t va1 = vld1_s8(a1); a1 += 8;
const int8x8_t va2 = vld1_s8(a2); a2 += 8;
const int8x8_t va3 = vld1_s8(a3); a3 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
const int8x8_t va1c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 3));
const int8x8_t va2c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 3));
const int8x8_t va3c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 3));
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 20,167
| 53.069705
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c2-minmax-rndnu-neon-mull-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int16x4_t va00 = vld1_dup_s16((const void*)a0);
const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int16x4_t va10 = vld1_dup_s16((const void*)a1);
const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
const int16x4_t va20 = vld1_dup_s16((const void*)a2);
const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
const int16x4_t va30 = vld1_dup_s16((const void*)a3);
const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 20,432
| 52.072727
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c2-minmax-rndnu-neon-mull-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 20,032
| 52.137931
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c2-minmax-rndnu-neon-mull-ld4r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld4r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int16x4x4_t va1 = vld4_dup_s16((const void*)a1); a1 += 8;
const int16x4x4_t va2 = vld4_dup_s16((const void*)a2); a2 += 8;
const int16x4x4_t va3 = vld4_dup_s16((const void*)a3); a3 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]);
const int8x8_t va1c0 = vreinterpret_s8_s16(va1.val[0]);
const int8x8_t va2c0 = vreinterpret_s8_s16(va2.val[0]);
const int8x8_t va3c0 = vreinterpret_s8_s16(va3.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]);
const int8x8_t va1c1 = vreinterpret_s8_s16(va1.val[1]);
const int8x8_t va2c1 = vreinterpret_s8_s16(va2.val[1]);
const int8x8_t va3c1 = vreinterpret_s8_s16(va3.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]);
const int8x8_t va1c2 = vreinterpret_s8_s16(va1.val[2]);
const int8x8_t va2c2 = vreinterpret_s8_s16(va2.val[2]);
const int8x8_t va3c2 = vreinterpret_s8_s16(va3.val[2]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]);
const int8x8_t va1c3 = vreinterpret_s8_s16(va1.val[3]);
const int8x8_t va2c3 = vreinterpret_s8_s16(va2.val[3]);
const int8x8_t va3c3 = vreinterpret_s8_s16(va3.val[3]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 19,740
| 51.924933
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c2s4-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2s4__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
int8x8_t va3x1 = vld1_s8(a3); a3 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0);
int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0);
int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1x1);
vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2x1);
vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0);
int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0);
int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1x1);
vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2x1);
vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va1x1 = vext_s8(va1x1, va1x1, 2);
va2x0 = vext_s8(va2x0, va2x0, 2);
va2x1 = vext_s8(va2x1, va2x1, 2);
va3x0 = vext_s8(va3x0, va3x0, 2);
va3x1 = vext_s8(va3x1, va3x1, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0);
int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0);
int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1x1);
vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2x1);
vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0);
int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0);
int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1x1);
vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2x1);
vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va1x1 = vext_s8(va1x1, va1x1, 2);
va2x0 = vext_s8(va2x0, va2x0, 2);
va2x1 = vext_s8(va2x1, va2x1, 2);
va3x0 = vext_s8(va3x0, va3x0, 2);
va3x1 = vext_s8(va3x1, va3x1, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0);
int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0);
int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1x1);
vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2x1);
vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0);
int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0);
int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1x1);
vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2x1);
vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va1x1 = vext_s8(va1x1, va1x1, 2);
va2x0 = vext_s8(va2x0, va2x0, 2);
va2x1 = vext_s8(va2x1, va2x1, 2);
va3x0 = vext_s8(va3x0, va3x0, 2);
va3x1 = vext_s8(va3x1, va3x1, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0);
int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0);
int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1x1);
vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2x1);
vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0);
int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0);
int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1x1);
vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2x1);
vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0);
int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0);
int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0);
int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0);
int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va2x0 = vext_s8(va2x0, va2x0, 2);
va3x0 = vext_s8(va3x0, va3x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0);
int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0);
int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0);
int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0);
int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va2x0 = vext_s8(va2x0, va2x0, 2);
va3x0 = vext_s8(va3x0, va3x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0);
int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0);
int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0);
int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0);
int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va2x0 = vext_s8(va2x0, va2x0, 2);
va3x0 = vext_s8(va3x0, va3x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0);
int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0);
int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0);
int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0);
int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 21,940
| 50.025581
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c2s4-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2s4__neon_mull(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
do {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0);
int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0);
int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0);
int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0);
int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va2x0 = vext_s8(va2x0, va2x0, 2);
va3x0 = vext_s8(va3x0, va3x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0);
int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0);
int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0);
int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0);
int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va2x0 = vext_s8(va2x0, va2x0, 2);
va3x0 = vext_s8(va3x0, va3x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0);
int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0);
int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0);
int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0);
int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va1x0 = vext_s8(va1x0, va1x0, 2);
va2x0 = vext_s8(va2x0, va2x0, 2);
va3x0 = vext_s8(va3x0, va3x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0);
int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0);
int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0);
int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0);
int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
k -= 8 * sizeof(int8_t);
} while (k != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 13,458
| 46.896797
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c4-minmax-rndnu-neon-mull-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4__neon_mull_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc1x01 = vacc0x01;
int32x4_t vacc1x23 = vacc0x23;
int32x4_t vacc1x45 = vacc0x45;
int32x4_t vacc1x67 = vacc0x67;
int32x4_t vacc2x01 = vacc0x01;
int32x4_t vacc2x23 = vacc0x23;
int32x4_t vacc2x45 = vacc0x45;
int32x4_t vacc2x67 = vacc0x67;
int32x4_t vacc3x01 = vacc0x01;
int32x4_t vacc3x23 = vacc0x23;
int32x4_t vacc3x45 = vacc0x45;
int32x4_t vacc3x67 = vacc0x67;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t va1 = vld1_s8(a1); a1 += 8;
const int8x8_t va2 = vld1_s8(a2); a2 += 8;
const int8x8_t va3 = vld1_s8(a3); a3 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
const int8x8_t va3c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 1));
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 19,693
| 51.657754
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c4-minmax-rndnu-neon-mull-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4__neon_mull_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc1x01 = vacc0x01;
int32x4_t vacc1x23 = vacc0x23;
int32x4_t vacc1x45 = vacc0x45;
int32x4_t vacc1x67 = vacc0x67;
int32x4_t vacc2x01 = vacc0x01;
int32x4_t vacc2x23 = vacc0x23;
int32x4_t vacc2x45 = vacc0x45;
int32x4_t vacc2x67 = vacc0x67;
int32x4_t vacc3x01 = vacc0x01;
int32x4_t vacc3x23 = vacc0x23;
int32x4_t vacc3x45 = vacc0x45;
int32x4_t vacc3x67 = vacc0x67;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int32x2_t va00 = vld1_dup_s32((const void*)a0);
const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int32x2_t va10 = vld1_dup_s32((const void*)a1);
const int32x2_t va11 = vld1_dup_s32((const void*)(a1 + 4)); a1 += 8;
const int32x2_t va20 = vld1_dup_s32((const void*)a2);
const int32x2_t va21 = vld1_dup_s32((const void*)(a2 + 4)); a2 += 8;
const int32x2_t va30 = vld1_dup_s32((const void*)a3);
const int32x2_t va31 = vld1_dup_s32((const void*)(a3 + 4)); a3 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va00);
const int8x8_t va1c0 = vreinterpret_s8_s32(va10);
const int8x8_t va2c0 = vreinterpret_s8_s32(va20);
const int8x8_t va3c0 = vreinterpret_s8_s32(va30);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va01);
const int8x8_t va1c1 = vreinterpret_s8_s32(va11);
const int8x8_t va2c1 = vreinterpret_s8_s32(va21);
const int8x8_t va3c1 = vreinterpret_s8_s32(va31);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 19,734
| 51.208995
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c4-minmax-rndnu-neon-mull-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4__neon_mull_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc1x01 = vacc0x01;
int32x4_t vacc1x23 = vacc0x23;
int32x4_t vacc1x45 = vacc0x45;
int32x4_t vacc1x67 = vacc0x67;
int32x4_t vacc2x01 = vacc0x01;
int32x4_t vacc2x23 = vacc0x23;
int32x4_t vacc2x45 = vacc0x45;
int32x4_t vacc2x67 = vacc0x67;
int32x4_t vacc3x01 = vacc0x01;
int32x4_t vacc3x23 = vacc0x23;
int32x4_t vacc3x45 = vacc0x45;
int32x4_t vacc3x67 = vacc0x67;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int32x2x2_t va1 = vld2_dup_s32((const void*)a1); a1 += 8;
const int32x2x2_t va2 = vld2_dup_s32((const void*)a2); a2 += 8;
const int32x2x2_t va3 = vld2_dup_s32((const void*)a3); a3 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]);
const int8x8_t va1c0 = vreinterpret_s8_s32(va1.val[0]);
const int8x8_t va2c0 = vreinterpret_s8_s32(va2.val[0]);
const int8x8_t va3c0 = vreinterpret_s8_s32(va3.val[0]);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]);
const int8x8_t va1c1 = vreinterpret_s8_s32(va1.val[1]);
const int8x8_t va2c1 = vreinterpret_s8_s32(va2.val[1]);
const int8x8_t va3c1 = vreinterpret_s8_s32(va3.val[1]);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 19,522
| 51.200535
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
// Loop over groups of 8 columns.
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 4x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8;
const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8;
// Load a 8x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 4x8 * 8x8 --> 4x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 4x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 4;
const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 4;
// Load a 4x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 4x4 * 4x8 --> 4x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
// Main case where there the 8 columns fit in the destination.
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
// Advance to the next 8 columns.
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 11,616
| 47.606695
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c4s2-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc1x01 = vacc0x01;
int32x4_t vacc1x23 = vacc0x23;
int32x4_t vacc1x45 = vacc0x45;
int32x4_t vacc1x67 = vacc0x67;
int32x4_t vacc2x01 = vacc0x01;
int32x4_t vacc2x23 = vacc0x23;
int32x4_t vacc2x45 = vacc0x45;
int32x4_t vacc2x67 = vacc0x67;
int32x4_t vacc3x01 = vacc0x01;
int32x4_t vacc3x23 = vacc0x23;
int32x4_t vacc3x45 = vacc0x45;
int32x4_t vacc3x67 = vacc0x67;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
int8x8_t va3x1 = vld1_s8(a3); a3 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2x1);
vprod3x01c0 = vmlal_s8(vprod3x01c0, vb01c0x1, va3x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2x1);
vprod3x23c0 = vmlal_s8(vprod3x23c0, vb23c0x1, va3x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2x1);
vprod3x45c0 = vmlal_s8(vprod3x45c0, vb45c0x1, va3x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2x1);
vprod3x67c0 = vmlal_s8(vprod3x67c0, vb67c0x1, va3x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
va0x1 = vext_s8(va0x1, va0x1, 4);
va1x0 = vext_s8(va1x0, va1x0, 4);
va1x1 = vext_s8(va1x1, va1x1, 4);
va2x0 = vext_s8(va2x0, va2x0, 4);
va2x1 = vext_s8(va2x1, va2x1, 4);
va3x0 = vext_s8(va3x0, va3x0, 4);
va3x1 = vext_s8(va3x1, va3x1, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2x1);
vprod3x01c1 = vmlal_s8(vprod3x01c1, vb01c1x1, va3x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2x1);
vprod3x23c1 = vmlal_s8(vprod3x23c1, vb23c1x1, va3x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2x1);
vprod3x45c1 = vmlal_s8(vprod3x45c1, vb45c1x1, va3x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2x1);
vprod3x67c1 = vmlal_s8(vprod3x67c1, vb67c1x1, va3x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
va1x0 = vext_s8(va1x0, va1x0, 4);
va2x0 = vext_s8(va2x0, va2x0, 4);
va3x0 = vext_s8(va3x0, va3x0, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 22,945
| 49.991111
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c4s2-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc1x01 = vacc0x01;
int32x4_t vacc1x23 = vacc0x23;
int32x4_t vacc1x45 = vacc0x45;
int32x4_t vacc1x67 = vacc0x67;
int32x4_t vacc2x01 = vacc0x01;
int32x4_t vacc2x23 = vacc0x23;
int32x4_t vacc2x45 = vacc0x45;
int32x4_t vacc2x67 = vacc0x67;
int32x4_t vacc3x01 = vacc0x01;
int32x4_t vacc3x23 = vacc0x23;
int32x4_t vacc3x45 = vacc0x45;
int32x4_t vacc3x67 = vacc0x67;
size_t k = kc;
do {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
va1x0 = vext_s8(va1x0, va1x0, 4);
va2x0 = vext_s8(va2x0, va2x0, 4);
va3x0 = vext_s8(va3x0, va3x0, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
k -= 8 * sizeof(int8_t);
} while (k != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 15,647
| 48.362776
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-4x8c8-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c8-neon-mull.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mull(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc1x0 = vacc0x0;
int32x4_t vacc1x1 = vacc0x1;
int32x4_t vacc1x2 = vacc0x2;
int32x4_t vacc1x3 = vacc0x3;
int32x4_t vacc1x4 = vacc0x4;
int32x4_t vacc1x5 = vacc0x5;
int32x4_t vacc1x6 = vacc0x6;
int32x4_t vacc1x7 = vacc0x7;
int32x4_t vacc2x0 = vacc0x0;
int32x4_t vacc2x1 = vacc0x1;
int32x4_t vacc2x2 = vacc0x2;
int32x4_t vacc2x3 = vacc0x3;
int32x4_t vacc2x4 = vacc0x4;
int32x4_t vacc2x5 = vacc0x5;
int32x4_t vacc2x6 = vacc0x6;
int32x4_t vacc2x7 = vacc0x7;
int32x4_t vacc3x0 = vacc0x0;
int32x4_t vacc3x1 = vacc0x1;
int32x4_t vacc3x2 = vacc0x2;
int32x4_t vacc3x3 = vacc0x3;
int32x4_t vacc3x4 = vacc0x4;
int32x4_t vacc3x5 = vacc0x5;
int32x4_t vacc3x6 = vacc0x6;
int32x4_t vacc3x7 = vacc0x7;
size_t k = kc;
// Handle 8 bytes at a time using MUL.
while (k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t va1 = vld1_s8(a1); a1 += 8;
const int8x8_t va2 = vld1_s8(a2); a2 += 8;
const int8x8_t va3 = vld1_s8(a3); a3 += 8;
const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
const int16x8_t vprod1x0 = vmull_s8(vb0, va1);
const int16x8_t vprod2x0 = vmull_s8(vb0, va2);
const int16x8_t vprod3x0 = vmull_s8(vb0, va3);
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0);
vacc3x0 = vpadalq_s16(vacc3x0, vprod3x0);
const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
const int16x8_t vprod1x1 = vmull_s8(vb1, va1);
const int16x8_t vprod2x1 = vmull_s8(vb1, va2);
const int16x8_t vprod3x1 = vmull_s8(vb1, va3);
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1);
vacc3x1 = vpadalq_s16(vacc3x1, vprod3x1);
const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
const int16x8_t vprod1x2 = vmull_s8(vb2, va1);
const int16x8_t vprod2x2 = vmull_s8(vb2, va2);
const int16x8_t vprod3x2 = vmull_s8(vb2, va3);
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2);
vacc3x2 = vpadalq_s16(vacc3x2, vprod3x2);
const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
const int16x8_t vprod1x3 = vmull_s8(vb3, va1);
const int16x8_t vprod2x3 = vmull_s8(vb3, va2);
const int16x8_t vprod3x3 = vmull_s8(vb3, va3);
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3);
vacc3x3 = vpadalq_s16(vacc3x3, vprod3x3);
const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
const int16x8_t vprod1x4 = vmull_s8(vb4, va1);
const int16x8_t vprod2x4 = vmull_s8(vb4, va2);
const int16x8_t vprod3x4 = vmull_s8(vb4, va3);
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4);
vacc3x4 = vpadalq_s16(vacc3x4, vprod3x4);
const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
const int16x8_t vprod1x5 = vmull_s8(vb5, va1);
const int16x8_t vprod2x5 = vmull_s8(vb5, va2);
const int16x8_t vprod3x5 = vmull_s8(vb5, va3);
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5);
vacc3x5 = vpadalq_s16(vacc3x5, vprod3x5);
const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
const int16x8_t vprod1x6 = vmull_s8(vb6, va1);
const int16x8_t vprod2x6 = vmull_s8(vb6, va2);
const int16x8_t vprod3x6 = vmull_s8(vb6, va3);
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6);
vacc3x6 = vpadalq_s16(vacc3x6, vprod3x6);
const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
const int16x8_t vprod1x7 = vmull_s8(vb7, va1);
const int16x8_t vprod2x7 = vmull_s8(vb7, va2);
const int16x8_t vprod3x7 = vmull_s8(vb7, va3);
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7);
vacc3x7 = vpadalq_s16(vacc3x7, vprod3x7);
k -= 8 * sizeof(int8_t);
}
#if XNN_ARCH_ARM64
const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1);
const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3);
const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5);
const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7);
const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1);
const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3);
const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5);
const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7);
const int32x4_t vsum3x01 = vpaddq_s32(vacc3x0, vacc3x1);
const int32x4_t vsum3x23 = vpaddq_s32(vacc3x2, vacc3x3);
const int32x4_t vsum3x45 = vpaddq_s32(vacc3x4, vacc3x5);
const int32x4_t vsum3x67 = vpaddq_s32(vacc3x6, vacc3x7);
int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23);
int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67);
int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23);
int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67);
int32x4_t vacc3x0123 = vpaddq_s32(vsum3x01, vsum3x23);
int32x4_t vacc3x4567 = vpaddq_s32(vsum3x45, vsum3x67);
#else
const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0));
const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1));
const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2));
const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3));
const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1);
const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3);
int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 );
const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4));
const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5));
const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6));
const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7));
const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5);
const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7);
int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 );
const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0));
const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1));
const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2));
const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3));
const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1);
const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3);
int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 );
const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4));
const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5));
const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6));
const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7));
const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5);
const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7);
int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 );
const int32x2_t vpsum3x0 = vadd_s32(vget_low_s32(vacc3x0), vget_high_s32(vacc3x0));
const int32x2_t vpsum3x1 = vadd_s32(vget_low_s32(vacc3x1), vget_high_s32(vacc3x1));
const int32x2_t vpsum3x2 = vadd_s32(vget_low_s32(vacc3x2), vget_high_s32(vacc3x2));
const int32x2_t vpsum3x3 = vadd_s32(vget_low_s32(vacc3x3), vget_high_s32(vacc3x3));
const int32x2_t vsum3x01 = vpadd_s32(vpsum3x0, vpsum3x1);
const int32x2_t vsum3x23 = vpadd_s32(vpsum3x2, vpsum3x3);
int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23 );
const int32x2_t vpsum3x4 = vadd_s32(vget_low_s32(vacc3x4), vget_high_s32(vacc3x4));
const int32x2_t vpsum3x5 = vadd_s32(vget_low_s32(vacc3x5), vget_high_s32(vacc3x5));
const int32x2_t vpsum3x6 = vadd_s32(vget_low_s32(vacc3x6), vget_high_s32(vacc3x6));
const int32x2_t vpsum3x7 = vadd_s32(vget_low_s32(vacc3x7), vget_high_s32(vacc3x7));
const int32x2_t vsum3x45 = vpadd_s32(vpsum3x4, vpsum3x5);
const int32x2_t vsum3x67 = vpadd_s32(vpsum3x6, vpsum3x7);
int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67 );
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 19,665
| 50.889182
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-6x8c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_6x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const int8_t* a4 = (const int8_t*) ((uintptr_t) a3 + a_stride);
int8_t* c4 = (int8_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const int8_t* a5 = (const int8_t*) ((uintptr_t) a4 + a_stride);
int8_t* c5 = (int8_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
// Loop over groups of 8 columns.
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
int32x4_t vacc4x0123 = vacc0x0123;
int32x4_t vacc4x4567 = vacc0x4567;
int32x4_t vacc5x0123 = vacc0x0123;
int32x4_t vacc5x4567 = vacc0x4567;
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 6x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8;
const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8;
const int8x8_t va4x01234567 = vld1_s8(a4); a4 += 8;
const int8x8_t va5x01234567 = vld1_s8(a5); a5 += 8;
// Load a 8x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 6x8 * 8x8 --> 6x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0);
vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0);
vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0);
vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1);
vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb4567x0123, va4x01234567, 1);
vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb4567x4567, va4x01234567, 1);
vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb4567x0123, va5x01234567, 1);
vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb4567x4567, va5x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 6x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 4;
const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 4;
const int8x8_t va4x01234567 = vld1_s8(a4); a4 += 4;
const int8x8_t va5x01234567 = vld1_s8(a5); a5 += 4;
// Load a 4x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 6x4 * 4x8 --> 6x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0);
vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0);
vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0);
vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0);
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc4x0123 = vqshlq_s32(vacc4x0123, vright_pre_shift);
vacc4x4567 = vqshlq_s32(vacc4x4567, vright_pre_shift);
vacc5x0123 = vqshlq_s32(vacc5x0123, vright_pre_shift);
vacc5x4567 = vqshlq_s32(vacc5x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
int8x16_t vout4x01234567_5x01234567 = vqmovn_high_s16(vqmovn_s16(vacc4x01234567), vacc5x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
int8x16_t vout4x01234567_5x01234567 = vcombine_s8(vqmovn_s16(vacc4x01234567), vqmovn_s16(vacc5x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
vout4x01234567_5x01234567 = vmaxq_s8(vout4x01234567_5x01234567, voutput_min);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
vout4x01234567_5x01234567 = vminq_s8(vout4x01234567_5x01234567, voutput_max);
if (nc >= 8) {
// Main case where there the 8 columns fit in the destination.
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
vst1_s8(c4 + 0, vget_low_s8(vout4x01234567_5x01234567));
vst1_s8(c5 + 0, vget_high_s8(vout4x01234567_5x01234567));
// Advance to the next 8 columns.
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
c4 = (int8_t*) ((uintptr_t) c4 + cn_stride);
c5 = (int8_t*) ((uintptr_t) c5 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
a4 = (const int8_t*) ((uintptr_t) a4 - kc);
a5 = (const int8_t*) ((uintptr_t) a5 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vst1q_lane_u32((void*) c4, vreinterpretq_u32_s8(vout4x01234567_5x01234567), 0); c4 += 4;
vst1q_lane_u32((void*) c5, vreinterpretq_u32_s8(vout4x01234567_5x01234567), 2); c5 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vst1q_lane_u16((void*) c4, vreinterpretq_u16_s8(vout4x01234567_5x01234567), 0); c4 += 2;
vst1q_lane_u16((void*) c5, vreinterpretq_u16_s8(vout4x01234567_5x01234567), 4); c5 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
vst1q_lane_s8(c4, vout4x01234567_5x01234567, 0);
vst1q_lane_s8(c5, vout4x01234567_5x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 15,930
| 51.232787
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-8x8c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_8x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const int8_t* a4 = (const int8_t*) ((uintptr_t) a3 + a_stride);
int8_t* c4 = (int8_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const int8_t* a5 = (const int8_t*) ((uintptr_t) a4 + a_stride);
int8_t* c5 = (int8_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const int8_t* a6 = (const int8_t*) ((uintptr_t) a5 + a_stride);
int8_t* c6 = (int8_t*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
const int8_t* a7 = (const int8_t*) ((uintptr_t) a6 + a_stride);
int8_t* c7 = (int8_t*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
a7 = a6;
c7 = c6;
}
// Loop over groups of 8 columns.
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
int32x4_t vacc4x0123 = vacc0x0123;
int32x4_t vacc4x4567 = vacc0x4567;
int32x4_t vacc5x0123 = vacc0x0123;
int32x4_t vacc5x4567 = vacc0x4567;
int32x4_t vacc6x0123 = vacc0x0123;
int32x4_t vacc6x4567 = vacc0x4567;
int32x4_t vacc7x0123 = vacc0x0123;
int32x4_t vacc7x4567 = vacc0x4567;
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 8x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8;
const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8;
const int8x8_t va4x01234567 = vld1_s8(a4); a4 += 8;
const int8x8_t va5x01234567 = vld1_s8(a5); a5 += 8;
const int8x8_t va6x01234567 = vld1_s8(a6); a6 += 8;
const int8x8_t va7x01234567 = vld1_s8(a7); a7 += 8;
// Load a 8x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 8x8 * 8x8 --> 8x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0);
vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0);
vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0);
vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0);
vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb0123x0123, va6x01234567, 0);
vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb0123x4567, va6x01234567, 0);
vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb0123x0123, va7x01234567, 0);
vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb0123x4567, va7x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1);
vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb4567x0123, va4x01234567, 1);
vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb4567x4567, va4x01234567, 1);
vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb4567x0123, va5x01234567, 1);
vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb4567x4567, va5x01234567, 1);
vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb4567x0123, va6x01234567, 1);
vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb4567x4567, va6x01234567, 1);
vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb4567x0123, va7x01234567, 1);
vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb4567x4567, va7x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 8x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4;
const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 4;
const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 4;
const int8x8_t va4x01234567 = vld1_s8(a4); a4 += 4;
const int8x8_t va5x01234567 = vld1_s8(a5); a5 += 4;
const int8x8_t va6x01234567 = vld1_s8(a6); a6 += 4;
const int8x8_t va7x01234567 = vld1_s8(a7); a7 += 4;
// Load a 4x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 8x4 * 4x8 --> 8x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0);
vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0);
vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0);
vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0);
vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb0123x0123, va6x01234567, 0);
vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb0123x4567, va6x01234567, 0);
vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb0123x0123, va7x01234567, 0);
vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb0123x4567, va7x01234567, 0);
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
vacc4x0123 = vqshlq_s32(vacc4x0123, vright_pre_shift);
vacc4x4567 = vqshlq_s32(vacc4x4567, vright_pre_shift);
vacc5x0123 = vqshlq_s32(vacc5x0123, vright_pre_shift);
vacc5x4567 = vqshlq_s32(vacc5x4567, vright_pre_shift);
vacc6x0123 = vqshlq_s32(vacc6x0123, vright_pre_shift);
vacc6x4567 = vqshlq_s32(vacc6x4567, vright_pre_shift);
vacc7x0123 = vqshlq_s32(vacc7x0123, vright_pre_shift);
vacc7x4567 = vqshlq_s32(vacc7x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
vacc6x0123 = vqdmulhq_s32(vacc6x0123, vmultiplier);
vacc6x4567 = vqdmulhq_s32(vacc6x4567, vmultiplier);
vacc7x0123 = vqdmulhq_s32(vacc7x0123, vmultiplier);
vacc7x4567 = vqdmulhq_s32(vacc7x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
vacc6x0123 = vrshlq_s32(vacc6x0123, vright_post_shift);
vacc6x4567 = vrshlq_s32(vacc6x4567, vright_post_shift);
vacc7x0123 = vrshlq_s32(vacc7x0123, vright_post_shift);
vacc7x4567 = vrshlq_s32(vacc7x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
const int16x8_t vacc6x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x0123), vacc6x4567), voutput_zero_point);
const int16x8_t vacc7x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x0123), vacc7x4567), voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
int8x16_t vout4x01234567_5x01234567 = vqmovn_high_s16(vqmovn_s16(vacc4x01234567), vacc5x01234567);
int8x16_t vout6x01234567_7x01234567 = vqmovn_high_s16(vqmovn_s16(vacc6x01234567), vacc7x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
const int16x8_t vacc6x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x0123), vqmovn_s32(vacc6x4567)), voutput_zero_point);
const int16x8_t vacc7x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x0123), vqmovn_s32(vacc7x4567)), voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
int8x16_t vout4x01234567_5x01234567 = vcombine_s8(vqmovn_s16(vacc4x01234567), vqmovn_s16(vacc5x01234567));
int8x16_t vout6x01234567_7x01234567 = vcombine_s8(vqmovn_s16(vacc6x01234567), vqmovn_s16(vacc7x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
vout4x01234567_5x01234567 = vmaxq_s8(vout4x01234567_5x01234567, voutput_min);
vout6x01234567_7x01234567 = vmaxq_s8(vout6x01234567_7x01234567, voutput_min);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
vout4x01234567_5x01234567 = vminq_s8(vout4x01234567_5x01234567, voutput_max);
vout6x01234567_7x01234567 = vminq_s8(vout6x01234567_7x01234567, voutput_max);
if (nc >= 8) {
// Main case where there the 8 columns fit in the destination.
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
vst1_s8(c4 + 0, vget_low_s8(vout4x01234567_5x01234567));
vst1_s8(c5 + 0, vget_high_s8(vout4x01234567_5x01234567));
vst1_s8(c6 + 0, vget_low_s8(vout6x01234567_7x01234567));
vst1_s8(c7 + 0, vget_high_s8(vout6x01234567_7x01234567));
// Advance to the next 8 columns.
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
c4 = (int8_t*) ((uintptr_t) c4 + cn_stride);
c5 = (int8_t*) ((uintptr_t) c5 + cn_stride);
c6 = (int8_t*) ((uintptr_t) c6 + cn_stride);
c7 = (int8_t*) ((uintptr_t) c7 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
a1 = (const int8_t*) ((uintptr_t) a1 - kc);
a2 = (const int8_t*) ((uintptr_t) a2 - kc);
a3 = (const int8_t*) ((uintptr_t) a3 - kc);
a4 = (const int8_t*) ((uintptr_t) a4 - kc);
a5 = (const int8_t*) ((uintptr_t) a5 - kc);
a6 = (const int8_t*) ((uintptr_t) a6 - kc);
a7 = (const int8_t*) ((uintptr_t) a7 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
vst1q_lane_u32((void*) c4, vreinterpretq_u32_s8(vout4x01234567_5x01234567), 0); c4 += 4;
vst1q_lane_u32((void*) c5, vreinterpretq_u32_s8(vout4x01234567_5x01234567), 2); c5 += 4;
vst1q_lane_u32((void*) c6, vreinterpretq_u32_s8(vout6x01234567_7x01234567), 0); c6 += 4;
vst1q_lane_u32((void*) c7, vreinterpretq_u32_s8(vout6x01234567_7x01234567), 2); c7 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
vout6x01234567_7x01234567 = vextq_s8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
vst1q_lane_u16((void*) c4, vreinterpretq_u16_s8(vout4x01234567_5x01234567), 0); c4 += 2;
vst1q_lane_u16((void*) c5, vreinterpretq_u16_s8(vout4x01234567_5x01234567), 4); c5 += 2;
vst1q_lane_u16((void*) c6, vreinterpretq_u16_s8(vout6x01234567_7x01234567), 0); c6 += 2;
vst1q_lane_u16((void*) c7, vreinterpretq_u16_s8(vout6x01234567_7x01234567), 4); c7 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
vout6x01234567_7x01234567 = vextq_s8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
vst1q_lane_s8(c4, vout4x01234567_5x01234567, 0);
vst1q_lane_s8(c5, vout4x01234567_5x01234567, 8);
vst1q_lane_s8(c6, vout6x01234567_7x01234567, 0);
vst1q_lane_s8(c7, vout6x01234567_7x01234567, 8);
}
nc = 0;
}
} while (nc != 0);
}
| 20,244
| 53.568733
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16-minmax-fp32-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
void xnn_qs8_igemm_minmax_fp32_ukernel_1x16__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale);
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
vacc0x89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x89AB, vmagic_bias));
vacc0xCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpacc0xCDEF, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
vacc0x89AB = vqsubq_s32(vacc0x89AB, vmagic_bias_less_output_zero_point);
vacc0xCDEF = vqsubq_s32(vacc0xCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,722
| 52.599359
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16-minmax-fp32-neonv8-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qs8_igemm_minmax_fp32_ukernel_1x16__neonv8_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale);
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
vacc0x89AB = vcvtnq_s32_f32(vfpacc0x89AB);
vacc0xCDEF = vcvtnq_s32_f32(vfpacc0xCDEF);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,502
| 52.064309
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16-minmax-rndnu-neon-mlal-lane-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
#include <xnnpack/prefetch.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16__neon_mlal_lane_prfm(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
xnn_prefetch_to_l1((const int8_t*) w + 448);
xnn_prefetch_to_l1((const int8_t*) w + 512);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,862
| 52.363924
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16-minmax-rndnu-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,721
| 52.42492
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16-minmax-rndnu-neon-mull-addw-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mull-addw-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16__neon_mull_addw_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va0, 0));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c0));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c0));
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc0 = vmull_s8(vb89ABCDEFc0, vdup_lane_s8(va0, 0));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc0));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc0));
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va0, 1));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c1));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c1));
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc1 = vmull_s8(vb89ABCDEFc1, vdup_lane_s8(va0, 1));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc1));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc1));
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va0, 2));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c2));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c2));
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc2 = vmull_s8(vb89ABCDEFc2, vdup_lane_s8(va0, 2));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc2));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc2));
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va0, 3));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c3));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c3));
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc3 = vmull_s8(vb89ABCDEFc3, vdup_lane_s8(va0, 3));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc3));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc3));
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va0, 4));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c4));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c4));
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc4 = vmull_s8(vb89ABCDEFc4, vdup_lane_s8(va0, 4));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc4));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc4));
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va0, 5));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c5));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c5));
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc5 = vmull_s8(vb89ABCDEFc5, vdup_lane_s8(va0, 5));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc5));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc5));
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va0, 6));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c6));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c6));
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc6 = vmull_s8(vb89ABCDEFc6, vdup_lane_s8(va0, 6));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc6));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc6));
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c7 = vmull_s8(vb01234567c7, vdup_lane_s8(va0, 7));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c7));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c7));
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc7 = vmull_s8(vb89ABCDEFc7, vdup_lane_s8(va0, 7));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc7));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc7));
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va0, 0));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c0));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c0));
const int16x8_t vprod0x89ABCDEFc0 = vmull_s8(vb89ABCDEFc0, vdup_lane_s8(va0, 0));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc0));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc0));
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va0, 1));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c1));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c1));
const int16x8_t vprod0x89ABCDEFc1 = vmull_s8(vb89ABCDEFc1, vdup_lane_s8(va0, 1));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc1));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc1));
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va0, 2));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c2));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c2));
const int16x8_t vprod0x89ABCDEFc2 = vmull_s8(vb89ABCDEFc2, vdup_lane_s8(va0, 2));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc2));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc2));
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va0, 3));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c3));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c3));
const int16x8_t vprod0x89ABCDEFc3 = vmull_s8(vb89ABCDEFc3, vdup_lane_s8(va0, 3));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc3));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc3));
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va0, 4));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c4));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c4));
const int16x8_t vprod0x89ABCDEFc4 = vmull_s8(vb89ABCDEFc4, vdup_lane_s8(va0, 4));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc4));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc4));
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va0, 5));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c5));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c5));
const int16x8_t vprod0x89ABCDEFc5 = vmull_s8(vb89ABCDEFc5, vdup_lane_s8(va0, 5));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc5));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc5));
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va0, 6));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c6));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c6));
const int16x8_t vprod0x89ABCDEFc6 = vmull_s8(vb89ABCDEFc6, vdup_lane_s8(va0, 6));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc6));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc6));
}
}
}
}
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,232
| 52.751656
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c16-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c16-neon-mlal.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c16__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 16 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x8 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x9 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x10 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x11 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x12 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x13 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x14 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x15 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
// KC loop of 16 with up to 15 remainder
size_t k = kc;
while (k != 0) {
const int8x16_t va0 = vld1q_s8(a0); a0 += 16;
const int8x16_t vb0 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb1 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb2 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb3 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb4 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb5 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb6 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb7 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb8 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb9 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb10 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb11 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb12 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb13 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb14 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb15 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
int16x8_t vprod0x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va0));
vprod0x0 = vmlal_s8(vprod0x0, vget_high_s8(vb0), vget_high_s8(va0));
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
int16x8_t vprod0x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va0));
vprod0x1 = vmlal_s8(vprod0x1, vget_high_s8(vb1), vget_high_s8(va0));
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
int16x8_t vprod0x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va0));
vprod0x2 = vmlal_s8(vprod0x2, vget_high_s8(vb2), vget_high_s8(va0));
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
int16x8_t vprod0x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va0));
vprod0x3 = vmlal_s8(vprod0x3, vget_high_s8(vb3), vget_high_s8(va0));
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
int16x8_t vprod0x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va0));
vprod0x4 = vmlal_s8(vprod0x4, vget_high_s8(vb4), vget_high_s8(va0));
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
int16x8_t vprod0x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va0));
vprod0x5 = vmlal_s8(vprod0x5, vget_high_s8(vb5), vget_high_s8(va0));
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
int16x8_t vprod0x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va0));
vprod0x6 = vmlal_s8(vprod0x6, vget_high_s8(vb6), vget_high_s8(va0));
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
int16x8_t vprod0x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va0));
vprod0x7 = vmlal_s8(vprod0x7, vget_high_s8(vb7), vget_high_s8(va0));
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
int16x8_t vprod0x8 = vmull_s8(vget_low_s8(vb8), vget_low_s8(va0));
vprod0x8 = vmlal_s8(vprod0x8, vget_high_s8(vb8), vget_high_s8(va0));
vacc0x8 = vpadalq_s16(vacc0x8, vprod0x8);
int16x8_t vprod0x9 = vmull_s8(vget_low_s8(vb9), vget_low_s8(va0));
vprod0x9 = vmlal_s8(vprod0x9, vget_high_s8(vb9), vget_high_s8(va0));
vacc0x9 = vpadalq_s16(vacc0x9, vprod0x9);
int16x8_t vprod0x10 = vmull_s8(vget_low_s8(vb10), vget_low_s8(va0));
vprod0x10 = vmlal_s8(vprod0x10, vget_high_s8(vb10), vget_high_s8(va0));
vacc0x10 = vpadalq_s16(vacc0x10, vprod0x10);
int16x8_t vprod0x11 = vmull_s8(vget_low_s8(vb11), vget_low_s8(va0));
vprod0x11 = vmlal_s8(vprod0x11, vget_high_s8(vb11), vget_high_s8(va0));
vacc0x11 = vpadalq_s16(vacc0x11, vprod0x11);
int16x8_t vprod0x12 = vmull_s8(vget_low_s8(vb12), vget_low_s8(va0));
vprod0x12 = vmlal_s8(vprod0x12, vget_high_s8(vb12), vget_high_s8(va0));
vacc0x12 = vpadalq_s16(vacc0x12, vprod0x12);
int16x8_t vprod0x13 = vmull_s8(vget_low_s8(vb13), vget_low_s8(va0));
vprod0x13 = vmlal_s8(vprod0x13, vget_high_s8(vb13), vget_high_s8(va0));
vacc0x13 = vpadalq_s16(vacc0x13, vprod0x13);
int16x8_t vprod0x14 = vmull_s8(vget_low_s8(vb14), vget_low_s8(va0));
vprod0x14 = vmlal_s8(vprod0x14, vget_high_s8(vb14), vget_high_s8(va0));
vacc0x14 = vpadalq_s16(vacc0x14, vprod0x14);
int16x8_t vprod0x15 = vmull_s8(vget_low_s8(vb15), vget_low_s8(va0));
vprod0x15 = vmlal_s8(vprod0x15, vget_high_s8(vb15), vget_high_s8(va0));
vacc0x15 = vpadalq_s16(vacc0x15, vprod0x15);
k -= 16 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
const int32x4_t vsum0x89 = vpaddq_s32(vacc0x8, vacc0x9);
const int32x4_t vsum0xAB = vpaddq_s32(vacc0x10, vacc0x11);
const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13);
const int32x4_t vsum0xEF = vpaddq_s32(vacc0x14, vacc0x15);
int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vsum0x89, vsum0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF);
#else
const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
const int32x2_t vpsum0x8 = vadd_s32(vget_low_s32(vacc0x8), vget_high_s32(vacc0x8));
const int32x2_t vpsum0x9 = vadd_s32(vget_low_s32(vacc0x9), vget_high_s32(vacc0x9));
const int32x2_t vpsum0xA = vadd_s32(vget_low_s32(vacc0x10), vget_high_s32(vacc0x10));
const int32x2_t vpsum0xB = vadd_s32(vget_low_s32(vacc0x11), vget_high_s32(vacc0x11));
const int32x2_t vsum0x89 = vpadd_s32(vpsum0x8, vpsum0x9);
const int32x2_t vsum0xAB = vpadd_s32(vpsum0xA, vpsum0xB);
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB );
const int32x2_t vpsum0xC = vadd_s32(vget_low_s32(vacc0x12), vget_high_s32(vacc0x12));
const int32x2_t vpsum0xD = vadd_s32(vget_low_s32(vacc0x13), vget_high_s32(vacc0x13));
const int32x2_t vpsum0xE = vadd_s32(vget_low_s32(vacc0x14), vget_high_s32(vacc0x14));
const int32x2_t vpsum0xF = vadd_s32(vget_low_s32(vacc0x15), vget_high_s32(vacc0x15));
const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD);
const int32x2_t vsum0xEF = vpadd_s32(vpsum0xE, vpsum0xF);
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF );
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,195
| 53.6
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c2-minmax-rndnu-neon-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,909
| 55.084507
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c2-minmax-rndnu-neon-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4_t va00 = vld1_dup_s16((const void*)a0);
const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 20,138
| 54.326923
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c2-minmax-rndnu-neon-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,818
| 54.360335
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c2-minmax-rndnu-neon-mlal-ld4r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld4r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,589
| 54.183099
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c2-minmax-rndnu-neon-mull-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,452
| 47.835294
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c2-minmax-rndnu-neon-mull-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int16x4_t va00 = vld1_dup_s16((const void*)a0);
const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,525
| 47.550388
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c2-minmax-rndnu-neon-mull-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,421
| 47.523438
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c2-minmax-rndnu-neon-mull-ld4r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld4r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,346
| 47.419608
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c2s4-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2s4__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0x0);
const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0x0);
const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0x0);
const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0x0);
const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0x0);
const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0x0);
const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0x0);
const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0x0);
const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,069
| 47.517241
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c2s4-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2s4__neon_mull(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,205
| 40.654822
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c4-minmax-rndnu-neon-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 0));
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0c0x0);
const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0c0x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0c0x0);
const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0c0x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0c0x0);
const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0c0x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0c0x0);
const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0c0x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 1));
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0c1x0);
const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0c1x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0c1x0);
const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0c1x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0c1x0);
const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0c1x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0c1x0);
const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0c1x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,293
| 54.442529
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c4-minmax-rndnu-neon-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2_t va00x0 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x0 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int32x2_t va00x1 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x1 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va00x1);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0c0x0);
const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0c0x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0c0x0);
const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0c0x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0c0x0);
const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0c0x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0c0x0);
const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0c0x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va01x1);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0c1x0);
const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0c1x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0c1x0);
const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0c1x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0c1x0);
const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0c1x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0c1x0);
const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0c1x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2_t va00 = vld1_dup_s32((const void*)a0);
const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va00);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va01);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,334
| 54.08547
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c4-minmax-rndnu-neon-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2x2_t va0x0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int32x2x2_t va0x1 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va0x1.val[0]);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0c0x0);
const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0c0x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0c0x0);
const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0c0x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0c0x0);
const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0c0x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0c0x0);
const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0c0x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va0x1.val[1]);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0c1x0);
const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0c1x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0c1x0);
const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0c1x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0c1x0);
const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0c1x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0c1x0);
const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0c1x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,165
| 54.074713
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c4-minmax-rndnu-neon-mull-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mull_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,558
| 48.444882
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c4-minmax-rndnu-neon-mull-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mull_ld1r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int32x2_t va00 = vld1_dup_s32((const void*)a0);
const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va00);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va01);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,571
| 48.301961
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c4-minmax-rndnu-neon-mull-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mull_ld2r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,516
| 48.279528
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 1x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
// Load a 8x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x8 * 8x16 --> 1x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 1x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0);
// Load a 4x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x4 * 4x16 --> 1x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,205
| 39.711864
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c4s2-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
const int8x8_t vb89c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
const int8x8_t vbABc0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
va0x0 = vext_s8(va0x0, va0x0, 4);
va0x1 = vext_s8(va0x1, va0x1, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
const int8x8_t vb89c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
const int8x8_t vbABc1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
va0x0 = vext_s8(va0x0, va0x0, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,070
| 47.931818
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c4s2-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
va0x0 = vext_s8(va0x0, va0x0, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,694
| 43.269406
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c8-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c8-neon-mull.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x8 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x9 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x10 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x11 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x12 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x13 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x14 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x15 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
// 2x partial unrolled loop to load 16 bytes at a time using MLA.
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb8x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb9x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb10x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb11x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb12x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb13x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb14x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb15x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0);
vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1);
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0);
vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1);
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0);
vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1);
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0);
vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1);
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0);
vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1);
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0);
vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1);
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0);
vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1);
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0);
vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1);
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
const int8x8_t vb8x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x8 = vmull_s8(vb8x0, va0x0);
vprod0x8 = vmlal_s8(vprod0x8, vb8x1, va0x1);
vacc0x8 = vpadalq_s16(vacc0x8, vprod0x8);
const int8x8_t vb9x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x9 = vmull_s8(vb9x0, va0x0);
vprod0x9 = vmlal_s8(vprod0x9, vb9x1, va0x1);
vacc0x9 = vpadalq_s16(vacc0x9, vprod0x9);
const int8x8_t vb10x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x10 = vmull_s8(vb10x0, va0x0);
vprod0x10 = vmlal_s8(vprod0x10, vb10x1, va0x1);
vacc0x10 = vpadalq_s16(vacc0x10, vprod0x10);
const int8x8_t vb11x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x11 = vmull_s8(vb11x0, va0x0);
vprod0x11 = vmlal_s8(vprod0x11, vb11x1, va0x1);
vacc0x11 = vpadalq_s16(vacc0x11, vprod0x11);
const int8x8_t vb12x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x12 = vmull_s8(vb12x0, va0x0);
vprod0x12 = vmlal_s8(vprod0x12, vb12x1, va0x1);
vacc0x12 = vpadalq_s16(vacc0x12, vprod0x12);
const int8x8_t vb13x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x13 = vmull_s8(vb13x0, va0x0);
vprod0x13 = vmlal_s8(vprod0x13, vb13x1, va0x1);
vacc0x13 = vpadalq_s16(vacc0x13, vprod0x13);
const int8x8_t vb14x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x14 = vmull_s8(vb14x0, va0x0);
vprod0x14 = vmlal_s8(vprod0x14, vb14x1, va0x1);
vacc0x14 = vpadalq_s16(vacc0x14, vprod0x14);
const int8x8_t vb15x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x15 = vmull_s8(vb15x0, va0x0);
vprod0x15 = vmlal_s8(vprod0x15, vb15x1, va0x1);
vacc0x15 = vpadalq_s16(vacc0x15, vprod0x15);
k -= 16 * sizeof(int8_t);
}
// Handle 8 bytes at a time using MUL.
if (k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
const int8x8_t vb8 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x8 = vmull_s8(vb8, va0);
vacc0x8 = vpadalq_s16(vacc0x8, vprod0x8);
const int8x8_t vb9 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x9 = vmull_s8(vb9, va0);
vacc0x9 = vpadalq_s16(vacc0x9, vprod0x9);
const int8x8_t vb10 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x10 = vmull_s8(vb10, va0);
vacc0x10 = vpadalq_s16(vacc0x10, vprod0x10);
const int8x8_t vb11 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x11 = vmull_s8(vb11, va0);
vacc0x11 = vpadalq_s16(vacc0x11, vprod0x11);
const int8x8_t vb12 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x12 = vmull_s8(vb12, va0);
vacc0x12 = vpadalq_s16(vacc0x12, vprod0x12);
const int8x8_t vb13 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x13 = vmull_s8(vb13, va0);
vacc0x13 = vpadalq_s16(vacc0x13, vprod0x13);
const int8x8_t vb14 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x14 = vmull_s8(vb14, va0);
vacc0x14 = vpadalq_s16(vacc0x14, vprod0x14);
const int8x8_t vb15 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x15 = vmull_s8(vb15, va0);
vacc0x15 = vpadalq_s16(vacc0x15, vprod0x15);
k -= 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
const int32x4_t vsum0x89 = vpaddq_s32(vacc0x8, vacc0x9);
const int32x4_t vsum0xAB = vpaddq_s32(vacc0x10, vacc0x11);
const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13);
const int32x4_t vsum0xEF = vpaddq_s32(vacc0x14, vacc0x15);
int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vsum0x89, vsum0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF);
#else
const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
const int32x2_t vpsum0x8 = vadd_s32(vget_low_s32(vacc0x8), vget_high_s32(vacc0x8));
const int32x2_t vpsum0x9 = vadd_s32(vget_low_s32(vacc0x9), vget_high_s32(vacc0x9));
const int32x2_t vpsum0xA = vadd_s32(vget_low_s32(vacc0x10), vget_high_s32(vacc0x10));
const int32x2_t vpsum0xB = vadd_s32(vget_low_s32(vacc0x11), vget_high_s32(vacc0x11));
const int32x2_t vsum0x89 = vpadd_s32(vpsum0x8, vpsum0x9);
const int32x2_t vsum0xAB = vpadd_s32(vpsum0xA, vpsum0xB);
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB );
const int32x2_t vpsum0xC = vadd_s32(vget_low_s32(vacc0x12), vget_high_s32(vacc0x12));
const int32x2_t vpsum0xD = vadd_s32(vget_low_s32(vacc0x13), vget_high_s32(vacc0x13));
const int32x2_t vpsum0xE = vadd_s32(vget_low_s32(vacc0x14), vget_high_s32(vacc0x14));
const int32x2_t vpsum0xF = vadd_s32(vget_low_s32(vacc0x15), vget_high_s32(vacc0x15));
const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD);
const int32x2_t vsum0xEF = vpadd_s32(vpsum0xE, vpsum0xF);
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF );
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,746
| 53.97654
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x16c8-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c8-neon-mull.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mull(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x8 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x9 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x10 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x11 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x12 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x13 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x14 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x15 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
// Handle 8 bytes at a time using MUL.
while (k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
const int8x8_t vb8 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x8 = vmull_s8(vb8, va0);
vacc0x8 = vpadalq_s16(vacc0x8, vprod0x8);
const int8x8_t vb9 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x9 = vmull_s8(vb9, va0);
vacc0x9 = vpadalq_s16(vacc0x9, vprod0x9);
const int8x8_t vb10 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x10 = vmull_s8(vb10, va0);
vacc0x10 = vpadalq_s16(vacc0x10, vprod0x10);
const int8x8_t vb11 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x11 = vmull_s8(vb11, va0);
vacc0x11 = vpadalq_s16(vacc0x11, vprod0x11);
const int8x8_t vb12 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x12 = vmull_s8(vb12, va0);
vacc0x12 = vpadalq_s16(vacc0x12, vprod0x12);
const int8x8_t vb13 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x13 = vmull_s8(vb13, va0);
vacc0x13 = vpadalq_s16(vacc0x13, vprod0x13);
const int8x8_t vb14 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x14 = vmull_s8(vb14, va0);
vacc0x14 = vpadalq_s16(vacc0x14, vprod0x14);
const int8x8_t vb15 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x15 = vmull_s8(vb15, va0);
vacc0x15 = vpadalq_s16(vacc0x15, vprod0x15);
k -= 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
const int32x4_t vsum0x89 = vpaddq_s32(vacc0x8, vacc0x9);
const int32x4_t vsum0xAB = vpaddq_s32(vacc0x10, vacc0x11);
const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13);
const int32x4_t vsum0xEF = vpaddq_s32(vacc0x14, vacc0x15);
int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vsum0x89, vsum0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF);
#else
const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
const int32x2_t vpsum0x8 = vadd_s32(vget_low_s32(vacc0x8), vget_high_s32(vacc0x8));
const int32x2_t vpsum0x9 = vadd_s32(vget_low_s32(vacc0x9), vget_high_s32(vacc0x9));
const int32x2_t vpsum0xA = vadd_s32(vget_low_s32(vacc0x10), vget_high_s32(vacc0x10));
const int32x2_t vpsum0xB = vadd_s32(vget_low_s32(vacc0x11), vget_high_s32(vacc0x11));
const int32x2_t vsum0x89 = vpadd_s32(vpsum0x8, vpsum0x9);
const int32x2_t vsum0xAB = vpadd_s32(vpsum0xA, vpsum0xB);
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB );
const int32x2_t vpsum0xC = vadd_s32(vget_low_s32(vacc0x12), vget_high_s32(vacc0x12));
const int32x2_t vpsum0xD = vadd_s32(vget_low_s32(vacc0x13), vget_high_s32(vacc0x13));
const int32x2_t vpsum0xE = vadd_s32(vget_low_s32(vacc0x14), vget_high_s32(vacc0x14));
const int32x2_t vpsum0xF = vadd_s32(vget_low_s32(vacc0x15), vget_high_s32(vacc0x15));
const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD);
const int32x2_t vsum0xEF = vpadd_s32(vpsum0xE, vpsum0xF);
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF );
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,697
| 49.388889
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x1c4-minmax-fp32-armsimd32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_igemm_minmax_fp32_ukernel_1x1c4__armsimd32(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
const float vscale = params->fp32_armsimd32.scale;
const float vmagic_bias = params->fp32_armsimd32.magic_bias;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
w = (const void*) ((const int32_t*) w + 1);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const int8x4_t va0 = (int8x4_t) unaligned_load_s32(a0); a0 += 4;
const int16x2_t va0c02 = __sxtb16(va0);
const int16x2_t va0c13 = __sxtb16(__ror(va0, 8));
const int8x4_t vb0 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb0c02 = __sxtb16(vb0);
vacc0x0 = __smlad(va0c02, vb0c02, vacc0x0);
const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8));
vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0);
k -= 4 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
vfpacc0x0 *= vscale;
vfpacc0x0 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
const int32_t vmagic_bias_less_zero_point = params->fp32_armsimd32.magic_bias_less_zero_point;
vout0x0 = __qsub(vout0x0, vmagic_bias_less_zero_point);
vout0x0 = __ssat(vout0x0, 8);
const uint32_t vout0 = (uint32_t) vout0x0;
uint32_t vout = vout0;
const int8x4_t voutput_min = (int8x4_t) params->fp32_armsimd32.output_min;
__ssub8((int8x4_t) vout, voutput_min);
vout = (uint32_t) __sel((uint8x4_t) vout, (uint8x4_t) voutput_min);
const int8x4_t voutput_max = (int8x4_t) params->fp32_armsimd32.output_max;
__ssub8((int8x4_t) vout, voutput_max);
vout = (uint32_t) __sel((uint8x4_t) voutput_max, (uint8x4_t) vout);
*c0 = (int8_t) vout;
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 1;
} while (nc != 0);
}
| 3,123
| 26.165217
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x2-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const void*) ((const int8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,230
| 27.59292
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x2-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const void*) ((const int8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale = params->fp32_scalar_imagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,083
| 25.586207
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x2-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const void*) ((const int8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,123
| 26.646018
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x2-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_igemm_minmax_fp32_ukernel_1x2__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const void*) ((const int8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,268
| 27.929204
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x2-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x2__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const void*) ((const int8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding;
const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding;
const uint32_t vshift = params->rndnu_scalar.shift;
int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift);
int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift);
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
vout0x0 += voutput_zero_point;
vout0x1 += voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,199
| 27.828829
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x2c4-minmax-fp32-armsimd32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_igemm_minmax_fp32_ukernel_1x2c4__armsimd32(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
const float vscale = params->fp32_armsimd32.scale;
const float vmagic_bias = params->fp32_armsimd32.magic_bias;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const int8x4_t va0 = (int8x4_t) unaligned_load_s32(a0); a0 += 4;
const int16x2_t va0c02 = __sxtb16(va0);
const int16x2_t va0c13 = __sxtb16(__ror(va0, 8));
const int8x4_t vb0 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb0c02 = __sxtb16(vb0);
vacc0x0 = __smlad(va0c02, vb0c02, vacc0x0);
const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8));
vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0);
const int8x4_t vb1 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb1c02 = __sxtb16(vb1);
vacc0x1 = __smlad(va0c02, vb1c02, vacc0x1);
const int16x2_t vb1c13 = __sxtb16(__ror(vb1, 8));
vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1);
k -= 4 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
const int32_t vmagic_bias_less_zero_point = params->fp32_armsimd32.magic_bias_less_zero_point;
vout0x0 = __qsub(vout0x0, vmagic_bias_less_zero_point);
vout0x1 = __qsub(vout0x1, vmagic_bias_less_zero_point);
vout0x0 = __ssat(vout0x0, 8);
vout0x1 = __ssat(vout0x1, 8);
const uint32_t vout0 = (uint32_t) (uint8_t) vout0x0 | ((uint32_t) vout0x1 << 8);
uint32_t vout = vout0;
const int8x4_t voutput_min = (int8x4_t) params->fp32_armsimd32.output_min;
__ssub8((int8x4_t) vout, voutput_min);
vout = (uint32_t) __sel((uint8x4_t) vout, (uint8x4_t) voutput_min);
const int8x4_t voutput_max = (int8x4_t) params->fp32_armsimd32.output_max;
__ssub8((int8x4_t) vout, voutput_max);
vout = (uint32_t) __sel((uint8x4_t) voutput_max, (uint8x4_t) vout);
if XNN_LIKELY(nc >= 2) {
unaligned_store_u16(c0, (uint16_t) vout);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
*c0 = (int8_t) vout;
nc = 0;
}
} while (nc != 0);
}
| 3,867
| 27.651852
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x4-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const void*) ((const int8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,325
| 30.347826
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x4-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const void*) ((const int8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale = params->fp32_scalar_imagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2);
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
vout0x2 = math_max_s32(vout0x2, vmagic_min);
vout0x3 = math_max_s32(vout0x3, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
vout0x2 = math_min_s32(vout0x2, vmagic_max);
vout0x3 = math_min_s32(vout0x3, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
vout0x2 -= vmagic_bias_less_zero_point;
vout0x3 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,108
| 27.734266
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x4-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qs8_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const void*) ((const int8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,194
| 29.398551
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x4-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qs8_igemm_minmax_fp32_ukernel_1x4__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const void*) ((const int8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = __builtin_wasm_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = __builtin_wasm_max_f32(vfpacc0x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = __builtin_wasm_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = __builtin_wasm_min_f32(vfpacc0x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,403
| 30.913043
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-1x4-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qs8_igemm_minmax_rndnu_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const void*) ((const int8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding;
const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding;
const int64_t vextacc0x2 = math_mulext_s32(vacc0x2, vmultiplier) + vrounding;
const int64_t vextacc0x3 = math_mulext_s32(vacc0x3, vmultiplier) + vrounding;
const uint32_t vshift = params->rndnu_scalar.shift;
int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift);
int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift);
int32_t vout0x2 = (int32_t) math_asr_s64(vextacc0x2, vshift);
int32_t vout0x3 = (int32_t) math_asr_s64(vextacc0x3, vshift);
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
vout0x0 += voutput_zero_point;
vout0x1 += voutput_zero_point;
vout0x2 += voutput_zero_point;
vout0x3 += voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,262
| 30.813433
| 96
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.