repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c4-minmax-fp32-neonv8-mlal-ld2r.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/c4-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c4__neonv8_mlal_ld2r( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { a1 = a0; c1 = c0; } do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; size_t k = kc; while (k >= 16 * sizeof(int8_t)) { const int32x2x2_t va0x0 = vld2_dup_s32((const void*)a0); a0 += 8; const int32x2x2_t va0x1 = vld2_dup_s32((const void*)a0); a0 += 8; const int32x2x2_t va1x0 = vld2_dup_s32((const void*)a1); a1 += 8; const int32x2x2_t va1x1 = vld2_dup_s32((const void*)a1); a1 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0x0 = vreinterpret_s8_s32(va0x0.val[0]); const int8x8_t va0c0x1 = vreinterpret_s8_s32(va0x1.val[0]); const int8x8_t va1c0x0 = vreinterpret_s8_s32(va1x0.val[0]); const int8x8_t va1c0x1 = vreinterpret_s8_s32(va1x1.val[0]); int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1c0x0); const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1); vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1c0x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1c0x0); const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1); vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1c0x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1c0x0); const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1); vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1c0x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1c0x0); const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1); vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1c0x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); const int8x8_t va0c1x0 = vreinterpret_s8_s32(va0x0.val[1]); const int8x8_t va0c1x1 = vreinterpret_s8_s32(va0x1.val[1]); const int8x8_t va1c1x0 = vreinterpret_s8_s32(va1x0.val[1]); const int8x8_t va1c1x1 = vreinterpret_s8_s32(va1x1.val[1]); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1c1x0); const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1); vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1c1x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1c1x0); const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1); vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1c1x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1c1x0); const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1); vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1c1x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1c1x0); const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1); vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1c1x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); k -= 16 * sizeof(int8_t); } if (k >= 8 * sizeof(int8_t)) { const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8; const int32x2x2_t va1 = vld2_dup_s32((const void*)a1); a1 += 8; const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]); const int8x8_t va1c0 = vreinterpret_s8_s32(va1.val[0]); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]); const int8x8_t va1c1 = vreinterpret_s8_s32(va1.val[1]); const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1); const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1); const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1); const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1); const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); } #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); #endif float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123); vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); nc -= 8; } else { // Final case where not all of the 8 columns fit in the destination. if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); } nc = 0; } } while (nc != 0); }
16,666
50.283077
128
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c4s2-minmax-fp32-neon-mlal.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/c4-neon-mull-shuffle.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { a1 = a0; c1 = c0; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; size_t k = kc; while (k >= 16 * sizeof(int8_t)) { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va0x1 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va1x1 = vld1_s8(a1); a1 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0); const int8x8_t vb01c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1); vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0); const int8x8_t vb23c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1); vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0); const int8x8_t vb45c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1); vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0); const int8x8_t vb67c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1); vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); va0x0 = vext_s8(va0x0, va0x0, 4); va0x1 = vext_s8(va0x1, va0x1, 4); va1x0 = vext_s8(va1x0, va1x0, 4); va1x1 = vext_s8(va1x1, va1x1, 4); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0); const int8x8_t vb01c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1); vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0); const int8x8_t vb23c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1); vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0); const int8x8_t vb45c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1); vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0); const int8x8_t vb67c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1); vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); k -= 16 * sizeof(int8_t); } if (k != 0) { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); va0x0 = vext_s8(va0x0, va0x0, 4); va1x0 = vext_s8(va1x0, va1x0, 4); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); } #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); #endif float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4; vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4; vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias); vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias)); vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias)); vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias)); vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias)); const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_output_zero_point); vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point); vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point); vacc1x0123 = vqsubq_s32(vacc1x0123, vmagic_bias_less_output_zero_point); vacc1x4567 = vqsubq_s32(vacc1x4567, vmagic_bias_less_output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); nc -= 8; } else { // Final case where not all of the 8 columns fit in the destination. if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); } nc = 0; } } while (nc != 0); }
13,458
46.390845
125
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c4s2-minmax-fp32-neonv8-mlal.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/c4-neon-mull-shuffle.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { a1 = a0; c1 = c0; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; size_t k = kc; while (k >= 16 * sizeof(int8_t)) { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va0x1 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va1x1 = vld1_s8(a1); a1 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0); const int8x8_t vb01c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1); vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0); const int8x8_t vb23c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1); vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0); const int8x8_t vb45c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1); vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0); const int8x8_t vb67c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1); vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); va0x0 = vext_s8(va0x0, va0x0, 4); va0x1 = vext_s8(va0x1, va0x1, 4); va1x0 = vext_s8(va1x0, va1x0, 4); va1x1 = vext_s8(va1x1, va1x1, 4); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0); const int8x8_t vb01c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1); vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0); const int8x8_t vb23c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1); vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0); const int8x8_t vb45c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1); vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0); const int8x8_t vb67c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1); vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); k -= 16 * sizeof(int8_t); } if (k != 0) { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); va0x0 = vext_s8(va0x0, va0x0, 4); va1x0 = vext_s8(va1x0, va1x0, 4); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); } #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); #endif float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4; vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4; vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123); vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); nc -= 8; } else { // Final case where not all of the 8 columns fit in the destination. if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); } nc = 0; } } while (nc != 0); }
13,236
45.773852
110
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c8-minmax-fp32-avx2.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx8c8-avx2.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__avx2( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { a1 = a0; c1 = c0; } do { const __m128i vbias0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); const __m128i vbias0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m256i vacc0x01 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x0), vbias0x1, 1); const __m128i vbias0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); const __m128i vbias0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m256i vacc0x23 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x2), vbias0x3, 1); const __m128i vbias0x4 = _mm_cvtsi32_si128(((const int*) w)[4]); const __m128i vbias0x5 = _mm_cvtsi32_si128(((const int*) w)[5]); __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); const __m128i vbias0x6 = _mm_cvtsi32_si128(((const int*) w)[6]); const __m128i vbias0x7 = _mm_cvtsi32_si128(((const int*) w)[7]); __m256i vacc0x67 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x6), vbias0x7, 1); __m256i vacc1x01 = vacc0x01; __m256i vacc1x23 = vacc0x23; __m256i vacc1x45 = vacc0x45; __m256i vacc1x67 = vacc0x67; w = (const int32_t*) w + 8; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a0)); const __m256i vxa0 = _mm256_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a1)); const __m256i vxa1 = _mm256_cvtepi8_epi16(va1); a1 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m256i vxb01 = _mm256_cvtepi8_epi16(vb01); vacc0x01 = _mm256_add_epi32(vacc0x01, _mm256_madd_epi16(vxa0, vxb01)); vacc1x01 = _mm256_add_epi32(vacc1x01, _mm256_madd_epi16(vxa1, vxb01)); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m256i vxb23 = _mm256_cvtepi8_epi16(vb23); vacc0x23 = _mm256_add_epi32(vacc0x23, _mm256_madd_epi16(vxa0, vxb23)); vacc1x23 = _mm256_add_epi32(vacc1x23, _mm256_madd_epi16(vxa1, vxb23)); const __m128i vb45 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 32)); const __m256i vxb45 = _mm256_cvtepi8_epi16(vb45); vacc0x45 = _mm256_add_epi32(vacc0x45, _mm256_madd_epi16(vxa0, vxb45)); vacc1x45 = _mm256_add_epi32(vacc1x45, _mm256_madd_epi16(vxa1, vxb45)); const __m128i vb67 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 48)); const __m256i vxb67 = _mm256_cvtepi8_epi16(vb67); vacc0x67 = _mm256_add_epi32(vacc0x67, _mm256_madd_epi16(vxa0, vxb67)); vacc1x67 = _mm256_add_epi32(vacc1x67, _mm256_madd_epi16(vxa1, vxb67)); w = (const void*) ((const int8_t*) w + 64); k += 8 * sizeof(int8_t); } const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23); const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67); const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657); const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0); __m256i vacc0x01234567 = _mm256_permutevar8x32_epi32(vacc0x02461357, vpermute_mask); __m256i vacc1x01234567 = _mm256_permutevar8x32_epi32(vacc1x02461357, vpermute_mask); __m256 vscaled0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567); __m256 vscaled1x01234567 = _mm256_cvtepi32_ps(vacc1x01234567); const __m256 vscale01234567 = _mm256_load_ps(w); w = (const void*) ((const float*) w + 8); vscaled0x01234567 = _mm256_mul_ps(vscaled0x01234567, vscale01234567); vscaled1x01234567 = _mm256_mul_ps(vscaled1x01234567, vscale01234567); const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point); vscaled0x01234567 = _mm256_min_ps(vscaled0x01234567, voutput_max_less_zero_point); vscaled1x01234567 = _mm256_min_ps(vscaled1x01234567, voutput_max_less_zero_point); vacc0x01234567 = _mm256_cvtps_epi32(vscaled0x01234567); vacc1x01234567 = _mm256_cvtps_epi32(vscaled1x01234567); const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.output_zero_point); __m256i vacc01x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc0x01234567, vacc1x01234567), voutput_zero_point); vacc01x01234567 = _mm256_permute4x64_epi64(vacc01x01234567, _MM_SHUFFLE(3, 1, 2, 0)); __m256i vout = _mm256_packs_epi16(vacc01x01234567, vacc01x01234567); vout = _mm256_max_epi8(vout, _mm256_load_si256((const __m256i*) params->fp32_avx2.output_min)); __m128i vout_lo = _mm256_castsi256_si128(vout); __m128i vout_hi = _mm256_extracti128_si256(vout, 1); if (nc >= 8) { _mm_storel_epi64((__m128i*) c0, vout_lo); _mm_storel_epi64((__m128i*) c1, vout_hi); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); nc -= 8; } else { if (nc & 4) { _mm_storeu_si32(c0, vout_lo); _mm_storeu_si32(c1, vout_hi); c0 += 4; c1 += 4; vout_lo = _mm_srli_epi64(vout_lo, 32); vout_hi = _mm_srli_epi64(vout_hi, 32); } if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout_lo, 0)); unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout_hi, 0)); c0 += 2; c1 += 2; vout_lo = _mm_srli_epi32(vout_lo, 16); vout_hi = _mm_srli_epi32(vout_hi, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout_lo, 0); *c1 = (int8_t) _mm_extract_epi8(vout_hi, 0); } nc = 0; } } while (nc != 0); }
7,115
37.673913
120
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c8-minmax-fp32-neon-mlal.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/c8-neon-mull.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { a1 = a0; c1 = c0; } do { int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc1x0 = vacc0x0; int32x4_t vacc1x1 = vacc0x1; int32x4_t vacc1x2 = vacc0x2; int32x4_t vacc1x3 = vacc0x3; int32x4_t vacc1x4 = vacc0x4; int32x4_t vacc1x5 = vacc0x5; int32x4_t vacc1x6 = vacc0x6; int32x4_t vacc1x7 = vacc0x7; size_t k = kc; // 2x partial unrolled loop to load 16 bytes at a time using MLA. while (k >= 16 * sizeof(int8_t)) { const int8x8_t va0x0 = vld1_s8(a0); a0 += 8; const int8x8_t va0x1 = vld1_s8(a0); a0 += 8; const int8x8_t va1x0 = vld1_s8(a1); a1 += 8; const int8x8_t va1x1 = vld1_s8(a1); a1 += 8; const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0); int16x8_t vprod1x0 = vmull_s8(vb0x0, va1x0); vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1); vprod1x0 = vmlal_s8(vprod1x0, vb0x1, va1x1); vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0); vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0); const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0); int16x8_t vprod1x1 = vmull_s8(vb1x0, va1x0); vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1); vprod1x1 = vmlal_s8(vprod1x1, vb1x1, va1x1); vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1); vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1); const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0); int16x8_t vprod1x2 = vmull_s8(vb2x0, va1x0); vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1); vprod1x2 = vmlal_s8(vprod1x2, vb2x1, va1x1); vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2); vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2); const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0); int16x8_t vprod1x3 = vmull_s8(vb3x0, va1x0); vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1); vprod1x3 = vmlal_s8(vprod1x3, vb3x1, va1x1); vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3); vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3); const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0); int16x8_t vprod1x4 = vmull_s8(vb4x0, va1x0); vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4); vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4); const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0); int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1); vprod1x5 = vmlal_s8(vprod1x5, vb5x1, va1x1); vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5); vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5); const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0); int16x8_t vprod1x6 = vmull_s8(vb6x0, va1x0); vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1); vprod1x6 = vmlal_s8(vprod1x6, vb6x1, va1x1); vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6); vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6); const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0); int16x8_t vprod1x7 = vmull_s8(vb7x0, va1x0); vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1); vprod1x7 = vmlal_s8(vprod1x7, vb7x1, va1x1); vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7); vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7); k -= 16 * sizeof(int8_t); } // Handle 8 bytes at a time using MUL. if (k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x0 = vmull_s8(vb0, va0); const int16x8_t vprod1x0 = vmull_s8(vb0, va1); vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0); vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0); const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x1 = vmull_s8(vb1, va0); const int16x8_t vprod1x1 = vmull_s8(vb1, va1); vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1); vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1); const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x2 = vmull_s8(vb2, va0); const int16x8_t vprod1x2 = vmull_s8(vb2, va1); vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2); vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2); const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x3 = vmull_s8(vb3, va0); const int16x8_t vprod1x3 = vmull_s8(vb3, va1); vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3); vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3); const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x4 = vmull_s8(vb4, va0); const int16x8_t vprod1x4 = vmull_s8(vb4, va1); vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4); vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4); const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x5 = vmull_s8(vb5, va0); const int16x8_t vprod1x5 = vmull_s8(vb5, va1); vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5); vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5); const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x6 = vmull_s8(vb6, va0); const int16x8_t vprod1x6 = vmull_s8(vb6, va1); vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6); vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6); const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x7 = vmull_s8(vb7, va0); const int16x8_t vprod1x7 = vmull_s8(vb7, va1); vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7); vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7); k -= 8 * sizeof(int8_t); } #if XNN_ARCH_ARM64 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3); const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5); const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7); const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1); const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3); const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5); const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7); int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67); int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23); int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67); #else const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0)); const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1)); const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2)); const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4)); const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5)); const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6)); const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7)); const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5); const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 ); const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0)); const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3)); const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 ); const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5)); const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6)); const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7)); const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 ); #endif float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias); vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias)); vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias)); vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias)); vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias)); const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_output_zero_point); vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point); vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point); vacc1x0123 = vqsubq_s32(vacc1x0123, vmagic_bias_less_output_zero_point); vacc1x4567 = vqsubq_s32(vacc1x4567, vmagic_bias_less_output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); nc -= 8; } else { // Final case where not all of the 8 columns fit in the destination. if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); } nc = 0; } } while (nc != 0); }
15,935
49.751592
125
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c8-minmax-fp32-neonv8-mlal.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/c8-neon-mull.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { a1 = a0; c1 = c0; } do { int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc1x0 = vacc0x0; int32x4_t vacc1x1 = vacc0x1; int32x4_t vacc1x2 = vacc0x2; int32x4_t vacc1x3 = vacc0x3; int32x4_t vacc1x4 = vacc0x4; int32x4_t vacc1x5 = vacc0x5; int32x4_t vacc1x6 = vacc0x6; int32x4_t vacc1x7 = vacc0x7; size_t k = kc; // 2x partial unrolled loop to load 16 bytes at a time using MLA. while (k >= 16 * sizeof(int8_t)) { const int8x8_t va0x0 = vld1_s8(a0); a0 += 8; const int8x8_t va0x1 = vld1_s8(a0); a0 += 8; const int8x8_t va1x0 = vld1_s8(a1); a1 += 8; const int8x8_t va1x1 = vld1_s8(a1); a1 += 8; const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0); int16x8_t vprod1x0 = vmull_s8(vb0x0, va1x0); vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1); vprod1x0 = vmlal_s8(vprod1x0, vb0x1, va1x1); vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0); vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0); const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0); int16x8_t vprod1x1 = vmull_s8(vb1x0, va1x0); vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1); vprod1x1 = vmlal_s8(vprod1x1, vb1x1, va1x1); vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1); vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1); const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0); int16x8_t vprod1x2 = vmull_s8(vb2x0, va1x0); vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1); vprod1x2 = vmlal_s8(vprod1x2, vb2x1, va1x1); vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2); vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2); const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0); int16x8_t vprod1x3 = vmull_s8(vb3x0, va1x0); vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1); vprod1x3 = vmlal_s8(vprod1x3, vb3x1, va1x1); vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3); vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3); const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0); int16x8_t vprod1x4 = vmull_s8(vb4x0, va1x0); vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4); vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4); const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0); int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1); vprod1x5 = vmlal_s8(vprod1x5, vb5x1, va1x1); vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5); vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5); const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0); int16x8_t vprod1x6 = vmull_s8(vb6x0, va1x0); vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1); vprod1x6 = vmlal_s8(vprod1x6, vb6x1, va1x1); vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6); vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6); const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0); int16x8_t vprod1x7 = vmull_s8(vb7x0, va1x0); vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1); vprod1x7 = vmlal_s8(vprod1x7, vb7x1, va1x1); vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7); vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7); k -= 16 * sizeof(int8_t); } // Handle 8 bytes at a time using MUL. if (k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x0 = vmull_s8(vb0, va0); const int16x8_t vprod1x0 = vmull_s8(vb0, va1); vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0); vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0); const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x1 = vmull_s8(vb1, va0); const int16x8_t vprod1x1 = vmull_s8(vb1, va1); vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1); vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1); const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x2 = vmull_s8(vb2, va0); const int16x8_t vprod1x2 = vmull_s8(vb2, va1); vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2); vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2); const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x3 = vmull_s8(vb3, va0); const int16x8_t vprod1x3 = vmull_s8(vb3, va1); vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3); vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3); const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x4 = vmull_s8(vb4, va0); const int16x8_t vprod1x4 = vmull_s8(vb4, va1); vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4); vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4); const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x5 = vmull_s8(vb5, va0); const int16x8_t vprod1x5 = vmull_s8(vb5, va1); vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5); vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5); const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x6 = vmull_s8(vb6, va0); const int16x8_t vprod1x6 = vmull_s8(vb6, va1); vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6); vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6); const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x7 = vmull_s8(vb7, va0); const int16x8_t vprod1x7 = vmull_s8(vb7, va1); vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7); vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7); k -= 8 * sizeof(int8_t); } #if XNN_ARCH_ARM64 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3); const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5); const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7); const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1); const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3); const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5); const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7); int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67); int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23); int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67); #else const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0)); const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1)); const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2)); const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4)); const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5)); const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6)); const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7)); const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5); const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 ); const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0)); const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3)); const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 ); const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5)); const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6)); const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7)); const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 ); #endif float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123); vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); nc -= 8; } else { // Final case where not all of the 8 columns fit in the destination. if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); } nc = 0; } } while (nc != 0); }
15,713
49.204473
114
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-2x8c8-xw-minmax-fp32-avx2.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx8c8-avx2.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_xw_minmax_fp32_ukernel_2x8c8__avx2( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 2); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr != 2) { a1 = a0; c1 = c0; } do { const __m128i vbias0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); const __m128i vbias0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m256i vacc0x01 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x0), vbias0x1, 1); const __m128i vbias0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); const __m128i vbias0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m256i vacc0x23 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x2), vbias0x3, 1); const __m128i vbias0x4 = _mm_cvtsi32_si128(((const int*) w)[4]); const __m128i vbias0x5 = _mm_cvtsi32_si128(((const int*) w)[5]); __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); const __m128i vbias0x6 = _mm_cvtsi32_si128(((const int*) w)[6]); const __m128i vbias0x7 = _mm_cvtsi32_si128(((const int*) w)[7]); __m256i vacc0x67 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x6), vbias0x7, 1); __m256i vacc1x01 = vacc0x01; __m256i vacc1x23 = vacc0x23; __m256i vacc1x45 = vacc0x45; __m256i vacc1x67 = vacc0x67; w = (const int32_t*) w + 8; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a0)); const __m256i vxa0 = _mm256_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a1)); const __m256i vxa1 = _mm256_cvtepi8_epi16(va1); a1 += 8; const __m256i vxb01 = _mm256_load_si256((const __m256i*) w); vacc0x01 = _mm256_add_epi32(vacc0x01, _mm256_madd_epi16(vxa0, vxb01)); vacc1x01 = _mm256_add_epi32(vacc1x01, _mm256_madd_epi16(vxa1, vxb01)); const __m256i vxb23 = _mm256_load_si256((const __m256i*) ((const int16_t*) w + 16)); vacc0x23 = _mm256_add_epi32(vacc0x23, _mm256_madd_epi16(vxa0, vxb23)); vacc1x23 = _mm256_add_epi32(vacc1x23, _mm256_madd_epi16(vxa1, vxb23)); const __m256i vxb45 = _mm256_load_si256((const __m256i*) ((const int16_t*) w + 32)); vacc0x45 = _mm256_add_epi32(vacc0x45, _mm256_madd_epi16(vxa0, vxb45)); vacc1x45 = _mm256_add_epi32(vacc1x45, _mm256_madd_epi16(vxa1, vxb45)); const __m256i vxb67 = _mm256_load_si256((const __m256i*) ((const int16_t*) w + 48)); vacc0x67 = _mm256_add_epi32(vacc0x67, _mm256_madd_epi16(vxa0, vxb67)); vacc1x67 = _mm256_add_epi32(vacc1x67, _mm256_madd_epi16(vxa1, vxb67)); w = (const void*) ((const int16_t*) w + 64); k += 8 * sizeof(int8_t); } const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23); const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67); const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657); const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0); __m256i vacc0x01234567 = _mm256_permutevar8x32_epi32(vacc0x02461357, vpermute_mask); __m256i vacc1x01234567 = _mm256_permutevar8x32_epi32(vacc1x02461357, vpermute_mask); __m256 vscaled0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567); __m256 vscaled1x01234567 = _mm256_cvtepi32_ps(vacc1x01234567); const __m256 vscale01234567 = _mm256_load_ps(w); w = (const void*) ((const float*) w + 8); vscaled0x01234567 = _mm256_mul_ps(vscaled0x01234567, vscale01234567); vscaled1x01234567 = _mm256_mul_ps(vscaled1x01234567, vscale01234567); const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point); vscaled0x01234567 = _mm256_min_ps(vscaled0x01234567, voutput_max_less_zero_point); vscaled1x01234567 = _mm256_min_ps(vscaled1x01234567, voutput_max_less_zero_point); vacc0x01234567 = _mm256_cvtps_epi32(vscaled0x01234567); vacc1x01234567 = _mm256_cvtps_epi32(vscaled1x01234567); const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.output_zero_point); __m256i vacc01x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc0x01234567, vacc1x01234567), voutput_zero_point); vacc01x01234567 = _mm256_permute4x64_epi64(vacc01x01234567, _MM_SHUFFLE(3, 1, 2, 0)); __m256i vout = _mm256_packs_epi16(vacc01x01234567, vacc01x01234567); vout = _mm256_max_epi8(vout, _mm256_load_si256((const __m256i*) params->fp32_avx2.output_min)); __m128i vout_lo = _mm256_castsi256_si128(vout); __m128i vout_hi = _mm256_extracti128_si256(vout, 1); if (nc >= 8) { _mm_storel_epi64((__m128i*) c0, vout_lo); _mm_storel_epi64((__m128i*) c1, vout_hi); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); nc -= 8; } else { if (nc & 4) { _mm_storeu_si32(c0, vout_lo); _mm_storeu_si32(c1, vout_hi); c0 += 4; c1 += 4; vout_lo = _mm_srli_epi64(vout_lo, 32); vout_hi = _mm_srli_epi64(vout_hi, 32); } if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout_lo, 0)); unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout_hi, 0)); c0 += 2; c1 += 2; vout_lo = _mm_srli_epi32(vout_lo, 16); vout_hi = _mm_srli_epi32(vout_hi, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout_lo, 0); *c1 = (int8_t) _mm_extract_epi8(vout_hi, 0); } nc = 0; } } while (nc != 0); }
6,914
37.416667
120
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x2-minmax-fp32-scalar-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x2__scalar_fmagic( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; w = (const int32_t*) w + 2; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const int8_t*) w + 2; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; const float vscale0 = unaligned_indexed_load_f32(w, 0); vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; const float vscale1 = unaligned_indexed_load_f32(w, 1); vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; w = (const void*) ((const float*) w + 2); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point; int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point; int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point; int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); nc -= 2; } else { if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; } nc = 0; } } while (nc != 0); }
5,299
33.193548
116
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x2-minmax-fp32-scalar-imagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x2__scalar_imagic( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; w = (const int32_t*) w + 2; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const int8_t*) w + 2; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; const float vscale0 = unaligned_indexed_load_f32(w, 0); vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; const float vscale1 = unaligned_indexed_load_f32(w, 1); vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; w = (const void*) ((const float*) w + 2); const float vmagic_bias = params->fp32_scalar_imagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0); int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1); int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0); int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1); int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0); int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1); const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min; vout0x0 = math_max_s32(vout0x0, vmagic_min); vout0x1 = math_max_s32(vout0x1, vmagic_min); vout1x0 = math_max_s32(vout1x0, vmagic_min); vout1x1 = math_max_s32(vout1x1, vmagic_min); vout2x0 = math_max_s32(vout2x0, vmagic_min); vout2x1 = math_max_s32(vout2x1, vmagic_min); const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max; vout0x0 = math_min_s32(vout0x0, vmagic_max); vout0x1 = math_min_s32(vout0x1, vmagic_max); vout1x0 = math_min_s32(vout1x0, vmagic_max); vout1x1 = math_min_s32(vout1x1, vmagic_max); vout2x0 = math_min_s32(vout2x0, vmagic_max); vout2x1 = math_min_s32(vout2x1, vmagic_max); const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point; vout0x0 -= vmagic_bias_less_zero_point; vout0x1 -= vmagic_bias_less_zero_point; vout1x0 -= vmagic_bias_less_zero_point; vout1x1 -= vmagic_bias_less_zero_point; vout2x0 -= vmagic_bias_less_zero_point; vout2x1 -= vmagic_bias_less_zero_point; if XNN_LIKELY(nc >= 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); nc -= 2; } else { if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; } nc = 0; } } while (nc != 0); }
5,012
29.944444
102
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x2-minmax-fp32-scalar-lrintf.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <math.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x2__scalar_lrintf( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; w = (const int32_t*) w + 2; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const int8_t*) w + 2; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; const float vscale0 = unaligned_indexed_load_f32(w, 0); vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; const float vscale1 = unaligned_indexed_load_f32(w, 1); vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; w = (const void*) ((const float*) w + 2); const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point); const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0); const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1); const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0); const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1); const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0); const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1); const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point; int32_t vout0x0 = vrndacc0x0 + voutput_zero_point; int32_t vout0x1 = vrndacc0x1 + voutput_zero_point; int32_t vout1x0 = vrndacc1x0 + voutput_zero_point; int32_t vout1x1 = vrndacc1x1 + voutput_zero_point; int32_t vout2x0 = vrndacc2x0 + voutput_zero_point; int32_t vout2x1 = vrndacc2x1 + voutput_zero_point; if XNN_LIKELY(nc >= 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); nc -= 2; } else { if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; } nc = 0; } } while (nc != 0); }
5,144
32.193548
100
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x2-minmax-fp32-wasm-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x2__wasm_fmagic( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; w = (const int32_t*) w + 2; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const int8_t*) w + 2; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; const float vscale0 = unaligned_indexed_load_f32(w, 0); vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; const float vscale1 = unaligned_indexed_load_f32(w, 1); vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; w = (const void*) ((const float*) w + 2); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc2x0 = __builtin_wasm_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = __builtin_wasm_max_f32(vfpacc2x1, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc2x0 = __builtin_wasm_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = __builtin_wasm_min_f32(vfpacc2x1, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point; int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point; int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point; int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); nc -= 2; } else { if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; } nc = 0; } } while (nc != 0); }
5,417
33.954839
116
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4-minmax-fp32-scalar-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4__scalar_fmagic( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const int8_t*) w + 4; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc1x2 = (float) vacc1x2; float vfpacc1x3 = (float) vacc1x3; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc2x2 = (float) vacc2x2; float vfpacc2x3 = (float) vacc2x3; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; const float vscale2 = ((const float*) w)[2]; vfpacc0x2 *= vscale2; vfpacc1x2 *= vscale2; vfpacc2x2 *= vscale2; const float vscale3 = ((const float*) w)[3]; vfpacc0x3 *= vscale3; vfpacc1x3 *= vscale3; vfpacc2x3 *= vscale3; w = (const void*) ((const float*) w + 4); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point); vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point); vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point); vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point); vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point); vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point); vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point); vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point); vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point); vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point); vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc0x2 += vmagic_bias; vfpacc0x3 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc1x2 += vmagic_bias; vfpacc1x3 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc2x2 += vmagic_bias; vfpacc2x3 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point; int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point; int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point; int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point; int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point; int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point; int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point; int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point; int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2) - vmagic_bias_less_output_zero_point; int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 4) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); nc -= 4; } else { if (nc & 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; } if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; } nc = 0; } } while (nc != 0); }
8,380
36.084071
116
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4-minmax-fp32-scalar-imagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4__scalar_imagic( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const int8_t*) w + 4; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc1x2 = (float) vacc1x2; float vfpacc1x3 = (float) vacc1x3; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc2x2 = (float) vacc2x2; float vfpacc2x3 = (float) vacc2x3; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; const float vscale2 = ((const float*) w)[2]; vfpacc0x2 *= vscale2; vfpacc1x2 *= vscale2; vfpacc2x2 *= vscale2; const float vscale3 = ((const float*) w)[3]; vfpacc0x3 *= vscale3; vfpacc1x3 *= vscale3; vfpacc2x3 *= vscale3; w = (const void*) ((const float*) w + 4); const float vmagic_bias = params->fp32_scalar_imagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc0x2 += vmagic_bias; vfpacc0x3 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc1x2 += vmagic_bias; vfpacc1x3 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc2x2 += vmagic_bias; vfpacc2x3 += vmagic_bias; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0); int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1); int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2); int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3); int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0); int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1); int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2); int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3); int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0); int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1); int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2); int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3); const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min; vout0x0 = math_max_s32(vout0x0, vmagic_min); vout0x1 = math_max_s32(vout0x1, vmagic_min); vout0x2 = math_max_s32(vout0x2, vmagic_min); vout0x3 = math_max_s32(vout0x3, vmagic_min); vout1x0 = math_max_s32(vout1x0, vmagic_min); vout1x1 = math_max_s32(vout1x1, vmagic_min); vout1x2 = math_max_s32(vout1x2, vmagic_min); vout1x3 = math_max_s32(vout1x3, vmagic_min); vout2x0 = math_max_s32(vout2x0, vmagic_min); vout2x1 = math_max_s32(vout2x1, vmagic_min); vout2x2 = math_max_s32(vout2x2, vmagic_min); vout2x3 = math_max_s32(vout2x3, vmagic_min); const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max; vout0x0 = math_min_s32(vout0x0, vmagic_max); vout0x1 = math_min_s32(vout0x1, vmagic_max); vout0x2 = math_min_s32(vout0x2, vmagic_max); vout0x3 = math_min_s32(vout0x3, vmagic_max); vout1x0 = math_min_s32(vout1x0, vmagic_max); vout1x1 = math_min_s32(vout1x1, vmagic_max); vout1x2 = math_min_s32(vout1x2, vmagic_max); vout1x3 = math_min_s32(vout1x3, vmagic_max); vout2x0 = math_min_s32(vout2x0, vmagic_max); vout2x1 = math_min_s32(vout2x1, vmagic_max); vout2x2 = math_min_s32(vout2x2, vmagic_max); vout2x3 = math_min_s32(vout2x3, vmagic_max); const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point; vout0x0 -= vmagic_bias_less_zero_point; vout0x1 -= vmagic_bias_less_zero_point; vout0x2 -= vmagic_bias_less_zero_point; vout0x3 -= vmagic_bias_less_zero_point; vout1x0 -= vmagic_bias_less_zero_point; vout1x1 -= vmagic_bias_less_zero_point; vout1x2 -= vmagic_bias_less_zero_point; vout1x3 -= vmagic_bias_less_zero_point; vout2x0 -= vmagic_bias_less_zero_point; vout2x1 -= vmagic_bias_less_zero_point; vout2x2 -= vmagic_bias_less_zero_point; vout2x3 -= vmagic_bias_less_zero_point; if XNN_LIKELY(nc >= 4) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); nc -= 4; } else { if (nc & 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; } if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; } nc = 0; } } while (nc != 0); }
7,883
31.987448
102
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4-minmax-fp32-scalar-lrintf.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <math.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4__scalar_lrintf( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const int8_t*) w + 4; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc1x2 = (float) vacc1x2; float vfpacc1x3 = (float) vacc1x3; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc2x2 = (float) vacc2x2; float vfpacc2x3 = (float) vacc2x3; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; const float vscale2 = ((const float*) w)[2]; vfpacc0x2 *= vscale2; vfpacc1x2 *= vscale2; vfpacc2x2 *= vscale2; const float vscale3 = ((const float*) w)[3]; vfpacc0x3 *= vscale3; vfpacc1x3 *= vscale3; vfpacc2x3 *= vscale3; w = (const void*) ((const float*) w + 4); const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point); vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point); vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point); vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point); vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point); vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point); vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point); vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point); vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point); vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point); vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point); const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0); const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1); const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2); const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3); const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0); const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1); const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2); const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3); const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0); const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1); const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2); const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3); const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point; int32_t vout0x0 = vrndacc0x0 + voutput_zero_point; int32_t vout0x1 = vrndacc0x1 + voutput_zero_point; int32_t vout0x2 = vrndacc0x2 + voutput_zero_point; int32_t vout0x3 = vrndacc0x3 + voutput_zero_point; int32_t vout1x0 = vrndacc1x0 + voutput_zero_point; int32_t vout1x1 = vrndacc1x1 + voutput_zero_point; int32_t vout1x2 = vrndacc1x2 + voutput_zero_point; int32_t vout1x3 = vrndacc1x3 + voutput_zero_point; int32_t vout2x0 = vrndacc2x0 + voutput_zero_point; int32_t vout2x1 = vrndacc2x1 + voutput_zero_point; int32_t vout2x2 = vrndacc2x2 + voutput_zero_point; int32_t vout2x3 = vrndacc2x3 + voutput_zero_point; if XNN_LIKELY(nc >= 4) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); nc -= 4; } else { if (nc & 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; } if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; } nc = 0; } } while (nc != 0); }
8,153
35.079646
100
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4-minmax-fp32-wasm-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4__wasm_fmagic( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const int8_t*) w + 4; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc1x2 = (float) vacc1x2; float vfpacc1x3 = (float) vacc1x3; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc2x2 = (float) vacc2x2; float vfpacc2x3 = (float) vacc2x3; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; const float vscale2 = ((const float*) w)[2]; vfpacc0x2 *= vscale2; vfpacc1x2 *= vscale2; vfpacc2x2 *= vscale2; const float vscale3 = ((const float*) w)[3]; vfpacc0x3 *= vscale3; vfpacc1x3 *= vscale3; vfpacc2x3 *= vscale3; w = (const void*) ((const float*) w + 4); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc0x2 = __builtin_wasm_max_f32(vfpacc0x2, voutput_min_less_zero_point); vfpacc0x3 = __builtin_wasm_max_f32(vfpacc0x3, voutput_min_less_zero_point); vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc1x2 = __builtin_wasm_max_f32(vfpacc1x2, voutput_min_less_zero_point); vfpacc1x3 = __builtin_wasm_max_f32(vfpacc1x3, voutput_min_less_zero_point); vfpacc2x0 = __builtin_wasm_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = __builtin_wasm_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc2x2 = __builtin_wasm_max_f32(vfpacc2x2, voutput_min_less_zero_point); vfpacc2x3 = __builtin_wasm_max_f32(vfpacc2x3, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc0x2 = __builtin_wasm_min_f32(vfpacc0x2, voutput_max_less_zero_point); vfpacc0x3 = __builtin_wasm_min_f32(vfpacc0x3, voutput_max_less_zero_point); vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc1x2 = __builtin_wasm_min_f32(vfpacc1x2, voutput_max_less_zero_point); vfpacc1x3 = __builtin_wasm_min_f32(vfpacc1x3, voutput_max_less_zero_point); vfpacc2x0 = __builtin_wasm_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = __builtin_wasm_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc2x2 = __builtin_wasm_min_f32(vfpacc2x2, voutput_max_less_zero_point); vfpacc2x3 = __builtin_wasm_min_f32(vfpacc2x3, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc0x2 += vmagic_bias; vfpacc0x3 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc1x2 += vmagic_bias; vfpacc1x3 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc2x2 += vmagic_bias; vfpacc2x3 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point; int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point; int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point; int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point; int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point; int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point; int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point; int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point; int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2) - vmagic_bias_less_output_zero_point; int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 4) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); nc -= 4; } else { if (nc & 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; } if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; } nc = 0; } } while (nc != 0); }
8,618
37.137168
116
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2-minmax-fp32-avx-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2__avx_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
8,872
38.611607
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2-minmax-fp32-avx-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2__avx_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
8,986
38.765487
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2-minmax-fp32-sse2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2__sse2_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01); const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01); const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23); const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23); const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc22x0123 = _mm_max_epi16(vacc22x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_cvtsi128_si32(vout); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c2 = (int8_t) _mm_extract_epi16(vout, 4); } nc = 0; } } while (nc != 0); }
9,448
40.082609
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2-minmax-fp32-sse2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2__sse2_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc22x0123 = _mm_max_epi16(vacc22x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_cvtsi128_si32(vout); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c2 = (int8_t) _mm_extract_epi16(vout, 4); } nc = 0; } } while (nc != 0); }
9,538
40.473913
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2-minmax-fp32-sse41-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2__sse41_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
8,874
38.620536
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2-minmax-fp32-sse41-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2__sse41_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
8,988
38.774336
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2__wasmsimd_dot16x2_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0); a0 += 8; const v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1); a1 += 8; const v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2); a2 += 8; const v128_t vb01 = wasm_v128_load(w); const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01); const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0)); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1)); const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16); const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23); const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2)); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const v128_t vxa0 = wasm_i16x8_load8x8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const v128_t vxa1 = wasm_i16x8_load8x8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const v128_t vxa2 = wasm_i16x8_load8x8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const v128_t vxb0 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0)); if (k > 2 * sizeof(int8_t)) { const v128_t vxb1 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1)); if (k > 4 * sizeof(int8_t)) { const v128_t vxb2 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2)); } } } vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); const v128_t vscale0123 = wasm_v128_load(w); w = (const void*) ((const float*) w + 4); vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale0123); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc22x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc22x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c0, vout, 0); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c2, vout, 2); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c0, vout, 0); c0 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c2, vout, 4); c2 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c0, vout, 0); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c2, vout, 8); } nc = 0; } } while (nc != 0); }
8,376
37.077273
134
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2-minmax-fp32-wasmsimd-dot16x2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2__wasmsimd_dot16x2_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0); a0 += 8; const v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1); a1 += 8; const v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2); a2 += 8; const v128_t vxb0 = wasm_i16x8_load8x8(w); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0)); const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1)); const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2)); const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const v128_t vxa0 = wasm_i16x8_load8x8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const v128_t vxa1 = wasm_i16x8_load8x8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const v128_t vxa2 = wasm_i16x8_load8x8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const v128_t vxb0 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0)); if (k > 2 * sizeof(int8_t)) { const v128_t vxb1 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1)); if (k > 4 * sizeof(int8_t)) { const v128_t vxb2 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2)); } } } vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); const v128_t vscale0123 = wasm_v128_load(w); w = (const void*) ((const float*) w + 4); vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale0123); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc22x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc22x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c0, vout, 0); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c2, vout, 2); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c0, vout, 0); c0 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c2, vout, 4); c2 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c0, vout, 0); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c2, vout, 8); } nc = 0; } } while (nc != 0); }
8,276
36.96789
134
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2-minmax-fp32-xop-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2__xop_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
8,652
36.951754
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2-minmax-fp32-xop-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2__xop_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
8,766
37.117391
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2s4-minmax-fp32-avx-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2s4__avx_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
6,497
36.560694
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2s4-minmax-fp32-avx-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2s4__avx_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
6,611
36.782857
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2s4-minmax-fp32-sse2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2s4__sse2_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01); const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01); const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23); const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23); const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc22x0123 = _mm_max_epi16(vacc22x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_cvtsi128_si32(vout); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c2 = (int8_t) _mm_extract_epi16(vout, 4); } nc = 0; } } while (nc != 0); }
6,929
37.715084
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2s4-minmax-fp32-sse2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2s4__sse2_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc22x0123 = _mm_max_epi16(vacc22x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_cvtsi128_si32(vout); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c2 = (int8_t) _mm_extract_epi16(vout, 4); } nc = 0; } } while (nc != 0); }
7,019
38.217877
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2s4-minmax-fp32-sse41-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2s4__sse41_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
6,499
36.572254
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2s4-minmax-fp32-sse41-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2s4__sse41_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
6,613
36.794286
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2s4__wasmsimd_dot16x2_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0); a0 += 8; v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1); a1 += 8; v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2); a2 += 8; const v128_t vb01 = wasm_v128_load(w); const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01); const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16); const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23); const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3)); w = (const int8_t*) w + 32; k -= 8 * sizeof(int8_t); } while (k != 0); vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); const v128_t vscale0123 = wasm_v128_load(w); w = (const float*) w + 4; vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale0123); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc22x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc22x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c0, vout, 0); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c2, vout, 2); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c0, vout, 0); c0 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c2, vout, 4); c2 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c0, vout, 0); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c2, vout, 8); } nc = 0; } } while (nc != 0); }
6,374
35.428571
134
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2s4__wasmsimd_dot16x2_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0); a0 += 8; v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1); a1 += 8; v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2); a2 += 8; const v128_t vxb0 = wasm_i16x8_load8x8(w); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3)); w = (const int8_t*) w + 32; k -= 8 * sizeof(int8_t); } while (k != 0); vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); const v128_t vscale0123 = wasm_v128_load(w); w = (const float*) w + 4; vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale0123); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc22x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc22x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c0, vout, 0); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c2, vout, 2); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c0, vout, 0); c0 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c2, vout, 4); c2 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c0, vout, 0); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c2, vout, 8); } nc = 0; } } while (nc != 0); }
6,274
35.271676
134
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2s4-minmax-fp32-xop-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2s4__xop_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
6,382
35.062147
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c2s4-minmax-fp32-xop-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c2s4__xop_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
6,496
35.296089
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c8-minmax-fp32-avx-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c8__avx_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_load_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
6,821
35.875676
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c8-minmax-fp32-avx-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c8__avx_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_load_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
6,939
35.719577
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c8-minmax-fp32-sse2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c8__sse2_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01); const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01); const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23); const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23); const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2)); const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3)); const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2)); const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3)); const __m128i vacc2x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x0, vacc2x2), _mm_unpackhi_epi32(vacc2x0, vacc2x2)); const __m128i vacc2x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x1, vacc2x3), _mm_unpackhi_epi32(vacc2x1, vacc2x3)); __m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13)); __m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13)); __m128i vacc2x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x02, vacc2x13), _mm_unpackhi_epi32(vacc2x02, vacc2x13)); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_load_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc22x0123 = _mm_max_epi16(vacc22x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_srli_si128(vout, 4); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_srli_si128(vout, 4); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_cvtsi128_si32(vout); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c2 = (int8_t) _mm_extract_epi16(vout, 4); } nc = 0; } } while (nc != 0); }
7,669
40.016043
119
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c8-minmax-fp32-sse2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c8__sse2_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2)); const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3)); const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2)); const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3)); const __m128i vacc2x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x0, vacc2x2), _mm_unpackhi_epi32(vacc2x0, vacc2x2)); const __m128i vacc2x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x1, vacc2x3), _mm_unpackhi_epi32(vacc2x1, vacc2x3)); __m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13)); __m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13)); __m128i vacc2x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x02, vacc2x13), _mm_unpackhi_epi32(vacc2x02, vacc2x13)); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_load_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc22x0123 = _mm_max_epi16(vacc22x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_srli_si128(vout, 4); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_srli_si128(vout, 4); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_cvtsi128_si32(vout); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c2 = (int8_t) _mm_extract_epi16(vout, 4); } nc = 0; } } while (nc != 0); }
7,763
40.079365
119
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c8-minmax-fp32-sse41-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c8__sse41_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_load_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
6,770
36.40884
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c8-minmax-fp32-sse41-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c8__sse41_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_load_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
6,888
36.237838
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c8-minmax-fp32-xop-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c8__xop_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0); vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1); vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0); vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1); vacc2x0 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0); vacc2x1 = _mm_maddd_epi16(vxa2, vxb1, vacc2x1); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2); vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3); vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2); vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3); vacc2x2 = _mm_maddd_epi16(vxa2, vxb2, vacc2x2); vacc2x3 = _mm_maddd_epi16(vxa2, vxb3, vacc2x3); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_load_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
6,653
34.967568
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x4c8-minmax-fp32-xop-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x4c8__xop_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0); vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0); vacc2x0 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1); vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1); vacc2x1 = _mm_maddd_epi16(vxa2, vxb1, vacc2x1); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2); vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2); vacc2x2 = _mm_maddd_epi16(vxa2, vxb2, vacc2x2); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3); vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3); vacc2x3 = _mm_maddd_epi16(vxa2, vxb3, vacc2x3); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale0123 = _mm_load_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); } nc = 0; } } while (nc != 0); }
6,771
34.830688
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x8-minmax-fp32-neon-mlal-lane-prfm.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/gemm.h> #include <xnnpack/prefetch.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x8__neon_mlal_lane_prfm( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); xnn_prefetch_to_l1((const int8_t*) w + 448); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); } } } } } } } float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123); float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567); const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias); vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias)); vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias)); vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias)); vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias)); vacc2x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc2x0123, vmagic_bias)); vacc2x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc2x4567, vmagic_bias)); const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_output_zero_point); vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point); vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point); vacc1x0123 = vqsubq_s32(vacc1x0123, vmagic_bias_less_output_zero_point); vacc1x4567 = vqsubq_s32(vacc1x4567, vmagic_bias_less_output_zero_point); vacc2x0123 = vqsubq_s32(vacc2x0123, vmagic_bias_less_output_zero_point); vacc2x4567 = vqsubq_s32(vacc2x4567, vmagic_bias_less_output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c2 + 0, vout2x01234567); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1_lane_s8(c2, vout2x01234567, 0); } nc = 0; } } while (nc != 0); }
19,629
57.076923
125
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x8-minmax-fp32-neon-mlal-lane.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/gemm.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x8__neon_mlal_lane( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); } } } } } } } float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123); float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567); const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias); vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias)); vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias)); vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias)); vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias)); vacc2x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc2x0123, vmagic_bias)); vacc2x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc2x4567, vmagic_bias)); const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_output_zero_point); vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point); vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point); vacc1x0123 = vqsubq_s32(vacc1x0123, vmagic_bias_less_output_zero_point); vacc1x4567 = vqsubq_s32(vacc1x4567, vmagic_bias_less_output_zero_point); vacc2x0123 = vqsubq_s32(vacc2x0123, vmagic_bias_less_output_zero_point); vacc2x4567 = vqsubq_s32(vacc2x4567, vmagic_bias_less_output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c2 + 0, vout2x01234567); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1_lane_s8(c2, vout2x01234567, 0); } nc = 0; } } while (nc != 0); }
19,543
57.166667
125
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x8-minmax-fp32-neonv8-mlal-lane-prfm.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/prefetch.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x8__neonv8_mlal_lane_prfm( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); xnn_prefetch_to_l1((const int8_t*) w + 448); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); } } } } } } } float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123); float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123); vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567); vacc2x0123 = vcvtnq_s32_f32(vfpacc2x0123); vacc2x4567 = vcvtnq_s32_f32(vfpacc2x4567); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c2 + 0, vout2x01234567); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1_lane_s8(c2, vout2x01234567, 0); } nc = 0; } } while (nc != 0); }
19,329
56.35905
112
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x8-minmax-fp32-neonv8-mlal-lane.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x8__neonv8_mlal_lane( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); } } } } } } } float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123); float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123); vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567); vacc2x0123 = vcvtnq_s32_f32(vfpacc2x0123); vacc2x4567 = vcvtnq_s32_f32(vfpacc2x4567); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c2 + 0, vout2x01234567); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1_lane_s8(c2, vout2x01234567, 0); } nc = 0; } } while (nc != 0); }
19,243
56.444776
112
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x8c8-minmax-fp32-avx2.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx8c8-avx2.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_3x8c8__avx2( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { const __m128i vbias0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); const __m128i vbias0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m256i vacc0x01 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x0), vbias0x1, 1); const __m128i vbias0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); const __m128i vbias0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m256i vacc0x23 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x2), vbias0x3, 1); const __m128i vbias0x4 = _mm_cvtsi32_si128(((const int*) w)[4]); const __m128i vbias0x5 = _mm_cvtsi32_si128(((const int*) w)[5]); __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); const __m128i vbias0x6 = _mm_cvtsi32_si128(((const int*) w)[6]); const __m128i vbias0x7 = _mm_cvtsi32_si128(((const int*) w)[7]); __m256i vacc0x67 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x6), vbias0x7, 1); __m256i vacc1x01 = vacc0x01; __m256i vacc1x23 = vacc0x23; __m256i vacc1x45 = vacc0x45; __m256i vacc1x67 = vacc0x67; __m256i vacc2x01 = vacc0x01; __m256i vacc2x23 = vacc0x23; __m256i vacc2x45 = vacc0x45; __m256i vacc2x67 = vacc0x67; w = (const int32_t*) w + 8; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a0)); const __m256i vxa0 = _mm256_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a1)); const __m256i vxa1 = _mm256_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a2)); const __m256i vxa2 = _mm256_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m256i vxb01 = _mm256_cvtepi8_epi16(vb01); vacc0x01 = _mm256_add_epi32(vacc0x01, _mm256_madd_epi16(vxa0, vxb01)); vacc1x01 = _mm256_add_epi32(vacc1x01, _mm256_madd_epi16(vxa1, vxb01)); vacc2x01 = _mm256_add_epi32(vacc2x01, _mm256_madd_epi16(vxa2, vxb01)); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m256i vxb23 = _mm256_cvtepi8_epi16(vb23); vacc0x23 = _mm256_add_epi32(vacc0x23, _mm256_madd_epi16(vxa0, vxb23)); vacc1x23 = _mm256_add_epi32(vacc1x23, _mm256_madd_epi16(vxa1, vxb23)); vacc2x23 = _mm256_add_epi32(vacc2x23, _mm256_madd_epi16(vxa2, vxb23)); const __m128i vb45 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 32)); const __m256i vxb45 = _mm256_cvtepi8_epi16(vb45); vacc0x45 = _mm256_add_epi32(vacc0x45, _mm256_madd_epi16(vxa0, vxb45)); vacc1x45 = _mm256_add_epi32(vacc1x45, _mm256_madd_epi16(vxa1, vxb45)); vacc2x45 = _mm256_add_epi32(vacc2x45, _mm256_madd_epi16(vxa2, vxb45)); const __m128i vb67 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 48)); const __m256i vxb67 = _mm256_cvtepi8_epi16(vb67); vacc0x67 = _mm256_add_epi32(vacc0x67, _mm256_madd_epi16(vxa0, vxb67)); vacc1x67 = _mm256_add_epi32(vacc1x67, _mm256_madd_epi16(vxa1, vxb67)); vacc2x67 = _mm256_add_epi32(vacc2x67, _mm256_madd_epi16(vxa2, vxb67)); w = (const void*) ((const int8_t*) w + 64); k += 8 * sizeof(int8_t); } const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23); const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67); const __m256i vacc2x0213 = _mm256_hadd_epi32(vacc2x01, vacc2x23); const __m256i vacc2x4657 = _mm256_hadd_epi32(vacc2x45, vacc2x67); const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657); const __m256i vacc2x02461357 = _mm256_hadd_epi32(vacc2x0213, vacc2x4657); const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0); __m256i vacc0x01234567 = _mm256_permutevar8x32_epi32(vacc0x02461357, vpermute_mask); __m256i vacc1x01234567 = _mm256_permutevar8x32_epi32(vacc1x02461357, vpermute_mask); __m256i vacc2x01234567 = _mm256_permutevar8x32_epi32(vacc2x02461357, vpermute_mask); __m256 vscaled0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567); __m256 vscaled1x01234567 = _mm256_cvtepi32_ps(vacc1x01234567); __m256 vscaled2x01234567 = _mm256_cvtepi32_ps(vacc2x01234567); const __m256 vscale01234567 = _mm256_load_ps(w); w = (const void*) ((const float*) w + 8); vscaled0x01234567 = _mm256_mul_ps(vscaled0x01234567, vscale01234567); vscaled1x01234567 = _mm256_mul_ps(vscaled1x01234567, vscale01234567); vscaled2x01234567 = _mm256_mul_ps(vscaled2x01234567, vscale01234567); const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point); vscaled0x01234567 = _mm256_min_ps(vscaled0x01234567, voutput_max_less_zero_point); vscaled1x01234567 = _mm256_min_ps(vscaled1x01234567, voutput_max_less_zero_point); vscaled2x01234567 = _mm256_min_ps(vscaled2x01234567, voutput_max_less_zero_point); vacc0x01234567 = _mm256_cvtps_epi32(vscaled0x01234567); vacc1x01234567 = _mm256_cvtps_epi32(vscaled1x01234567); vacc2x01234567 = _mm256_cvtps_epi32(vscaled2x01234567); const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.output_zero_point); __m256i vacc01x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc0x01234567, vacc1x01234567), voutput_zero_point); __m256i vacc22x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc2x01234567, vacc2x01234567), voutput_zero_point); vacc01x01234567 = _mm256_permute4x64_epi64(vacc01x01234567, _MM_SHUFFLE(3, 1, 2, 0)); vacc22x01234567 = _mm256_permute4x64_epi64(vacc22x01234567, _MM_SHUFFLE(3, 1, 2, 0)); __m256i vout = _mm256_packs_epi16(vacc01x01234567, vacc22x01234567); vout = _mm256_max_epi8(vout, _mm256_load_si256((const __m256i*) params->fp32_avx2.output_min)); __m128i vout_lo = _mm256_castsi256_si128(vout); __m128i vout_hi = _mm256_extracti128_si256(vout, 1); if (nc >= 8) { _mm_storel_epi64((__m128i*) c0, vout_lo); _mm_storel_epi64((__m128i*) c1, vout_hi); _mm_storeh_pi((__m64*) c2, _mm_castsi128_ps(vout_lo)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 8; } else { if (nc & 4) { _mm_storeu_si32(c0, vout_lo); _mm_storeu_si32(c1, vout_hi); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout_lo, 2)); c0 += 4; c1 += 4; c2 += 4; vout_lo = _mm_srli_epi64(vout_lo, 32); vout_hi = _mm_srli_epi64(vout_hi, 32); } if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout_lo, 0)); unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout_hi, 0)); unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout_lo, 4)); c0 += 2; c1 += 2; c2 += 2; vout_lo = _mm_srli_epi32(vout_lo, 16); vout_hi = _mm_srli_epi32(vout_hi, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout_lo, 0); *c1 = (int8_t) _mm_extract_epi8(vout_hi, 0); *c2 = (int8_t) _mm_extract_epi8(vout_lo, 8); } nc = 0; } } while (nc != 0); }
9,099
40.552511
120
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-3x8c8-xw-minmax-fp32-avx2.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx8c8-avx2.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_xw_minmax_fp32_ukernel_3x8c8__avx2( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } do { const __m128i vbias0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); const __m128i vbias0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m256i vacc0x01 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x0), vbias0x1, 1); const __m128i vbias0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); const __m128i vbias0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m256i vacc0x23 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x2), vbias0x3, 1); const __m128i vbias0x4 = _mm_cvtsi32_si128(((const int*) w)[4]); const __m128i vbias0x5 = _mm_cvtsi32_si128(((const int*) w)[5]); __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); const __m128i vbias0x6 = _mm_cvtsi32_si128(((const int*) w)[6]); const __m128i vbias0x7 = _mm_cvtsi32_si128(((const int*) w)[7]); __m256i vacc0x67 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x6), vbias0x7, 1); __m256i vacc1x01 = vacc0x01; __m256i vacc1x23 = vacc0x23; __m256i vacc1x45 = vacc0x45; __m256i vacc1x67 = vacc0x67; __m256i vacc2x01 = vacc0x01; __m256i vacc2x23 = vacc0x23; __m256i vacc2x45 = vacc0x45; __m256i vacc2x67 = vacc0x67; w = (const int32_t*) w + 8; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a0)); const __m256i vxa0 = _mm256_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a1)); const __m256i vxa1 = _mm256_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a2)); const __m256i vxa2 = _mm256_cvtepi8_epi16(va2); a2 += 8; const __m256i vxb01 = _mm256_load_si256((const __m256i*) w); vacc0x01 = _mm256_add_epi32(vacc0x01, _mm256_madd_epi16(vxa0, vxb01)); vacc1x01 = _mm256_add_epi32(vacc1x01, _mm256_madd_epi16(vxa1, vxb01)); vacc2x01 = _mm256_add_epi32(vacc2x01, _mm256_madd_epi16(vxa2, vxb01)); const __m256i vxb23 = _mm256_load_si256((const __m256i*) ((const int16_t*) w + 16)); vacc0x23 = _mm256_add_epi32(vacc0x23, _mm256_madd_epi16(vxa0, vxb23)); vacc1x23 = _mm256_add_epi32(vacc1x23, _mm256_madd_epi16(vxa1, vxb23)); vacc2x23 = _mm256_add_epi32(vacc2x23, _mm256_madd_epi16(vxa2, vxb23)); const __m256i vxb45 = _mm256_load_si256((const __m256i*) ((const int16_t*) w + 32)); vacc0x45 = _mm256_add_epi32(vacc0x45, _mm256_madd_epi16(vxa0, vxb45)); vacc1x45 = _mm256_add_epi32(vacc1x45, _mm256_madd_epi16(vxa1, vxb45)); vacc2x45 = _mm256_add_epi32(vacc2x45, _mm256_madd_epi16(vxa2, vxb45)); const __m256i vxb67 = _mm256_load_si256((const __m256i*) ((const int16_t*) w + 48)); vacc0x67 = _mm256_add_epi32(vacc0x67, _mm256_madd_epi16(vxa0, vxb67)); vacc1x67 = _mm256_add_epi32(vacc1x67, _mm256_madd_epi16(vxa1, vxb67)); vacc2x67 = _mm256_add_epi32(vacc2x67, _mm256_madd_epi16(vxa2, vxb67)); w = (const void*) ((const int16_t*) w + 64); k += 8 * sizeof(int8_t); } const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23); const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67); const __m256i vacc2x0213 = _mm256_hadd_epi32(vacc2x01, vacc2x23); const __m256i vacc2x4657 = _mm256_hadd_epi32(vacc2x45, vacc2x67); const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657); const __m256i vacc2x02461357 = _mm256_hadd_epi32(vacc2x0213, vacc2x4657); const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0); __m256i vacc0x01234567 = _mm256_permutevar8x32_epi32(vacc0x02461357, vpermute_mask); __m256i vacc1x01234567 = _mm256_permutevar8x32_epi32(vacc1x02461357, vpermute_mask); __m256i vacc2x01234567 = _mm256_permutevar8x32_epi32(vacc2x02461357, vpermute_mask); __m256 vscaled0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567); __m256 vscaled1x01234567 = _mm256_cvtepi32_ps(vacc1x01234567); __m256 vscaled2x01234567 = _mm256_cvtepi32_ps(vacc2x01234567); const __m256 vscale01234567 = _mm256_load_ps(w); w = (const void*) ((const float*) w + 8); vscaled0x01234567 = _mm256_mul_ps(vscaled0x01234567, vscale01234567); vscaled1x01234567 = _mm256_mul_ps(vscaled1x01234567, vscale01234567); vscaled2x01234567 = _mm256_mul_ps(vscaled2x01234567, vscale01234567); const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point); vscaled0x01234567 = _mm256_min_ps(vscaled0x01234567, voutput_max_less_zero_point); vscaled1x01234567 = _mm256_min_ps(vscaled1x01234567, voutput_max_less_zero_point); vscaled2x01234567 = _mm256_min_ps(vscaled2x01234567, voutput_max_less_zero_point); vacc0x01234567 = _mm256_cvtps_epi32(vscaled0x01234567); vacc1x01234567 = _mm256_cvtps_epi32(vscaled1x01234567); vacc2x01234567 = _mm256_cvtps_epi32(vscaled2x01234567); const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.output_zero_point); __m256i vacc01x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc0x01234567, vacc1x01234567), voutput_zero_point); __m256i vacc22x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc2x01234567, vacc2x01234567), voutput_zero_point); vacc01x01234567 = _mm256_permute4x64_epi64(vacc01x01234567, _MM_SHUFFLE(3, 1, 2, 0)); vacc22x01234567 = _mm256_permute4x64_epi64(vacc22x01234567, _MM_SHUFFLE(3, 1, 2, 0)); __m256i vout = _mm256_packs_epi16(vacc01x01234567, vacc22x01234567); vout = _mm256_max_epi8(vout, _mm256_load_si256((const __m256i*) params->fp32_avx2.output_min)); __m128i vout_lo = _mm256_castsi256_si128(vout); __m128i vout_hi = _mm256_extracti128_si256(vout, 1); if (nc >= 8) { _mm_storel_epi64((__m128i*) c0, vout_lo); _mm_storel_epi64((__m128i*) c1, vout_hi); _mm_storeh_pi((__m64*) c2, _mm_castsi128_ps(vout_lo)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); nc -= 8; } else { if (nc & 4) { _mm_storeu_si32(c0, vout_lo); _mm_storeu_si32(c1, vout_hi); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout_lo, 2)); c0 += 4; c1 += 4; c2 += 4; vout_lo = _mm_srli_epi64(vout_lo, 32); vout_hi = _mm_srli_epi64(vout_hi, 32); } if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout_lo, 0)); unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout_hi, 0)); unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout_lo, 4)); c0 += 2; c1 += 2; c2 += 2; vout_lo = _mm_srli_epi32(vout_lo, 16); vout_hi = _mm_srli_epi32(vout_hi, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout_lo, 0); *c1 = (int8_t) _mm_extract_epi8(vout_hi, 0); *c2 = (int8_t) _mm_extract_epi8(vout_lo, 8); } nc = 0; } } while (nc != 0); }
8,898
40.390698
120
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x16c4-minmax-fp32-neondot.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/c4-neondot.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x16c4__neondot( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } // Loop over groups of 16 columns. do { // Initialize accumulators with bias. 16 bias values are loaded from the // weight matrix, at the start of the group of 16 columns. int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc1x89AB = vacc0x89AB; int32x4_t vacc1xCDEF = vacc0xCDEF; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc2x89AB = vacc0x89AB; int32x4_t vacc2xCDEF = vacc0xCDEF; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; int32x4_t vacc3x89AB = vacc0x89AB; int32x4_t vacc3xCDEF = vacc0xCDEF; // Inner accumulation loop along the 16 columns. size_t k = kc; // 2x partial unrolled loop to load 8 bytes at a time. while (k >= 8 * sizeof(int8_t)) { // Load a 4x8 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8; const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8; const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8; // Load a 8x16 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 4x8 * 8x16 --> 4x16. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0); vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0); vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb0123x89AB, va2x01234567, 0); vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb0123xCDEF, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb0123x89AB, va3x01234567, 0); vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb0123xCDEF, va3x01234567, 0); vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1); vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1); vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1); vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb4567x89AB, va1x01234567, 1); vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb4567xCDEF, va1x01234567, 1); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1); vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb4567x89AB, va2x01234567, 1); vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb4567xCDEF, va2x01234567, 1); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1); vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb4567x89AB, va3x01234567, 1); vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb4567xCDEF, va3x01234567, 1); k -= 8 * sizeof(int8_t); } // Handle up to 4 final positions of `k` if XNN_UNLIKELY(k != 0) { // Load a 4x4 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4; const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 4; const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 4; // Load a 4x16 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 4x4 * 4x16 --> 4x16. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0); vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0); vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb0123x89AB, va2x01234567, 0); vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb0123xCDEF, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb0123x89AB, va3x01234567, 0); vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb0123xCDEF, va3x01234567, 0); } float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB); float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); float32x4_t vfpacc1x89AB = vcvtq_f32_s32(vacc1x89AB); float32x4_t vfpacc1xCDEF = vcvtq_f32_s32(vacc1xCDEF); float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123); float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567); float32x4_t vfpacc2x89AB = vcvtq_f32_s32(vacc2x89AB); float32x4_t vfpacc2xCDEF = vcvtq_f32_s32(vacc2xCDEF); float32x4_t vfpacc3x0123 = vcvtq_f32_s32(vacc3x0123); float32x4_t vfpacc3x4567 = vcvtq_f32_s32(vacc3x4567); float32x4_t vfpacc3x89AB = vcvtq_f32_s32(vacc3x89AB); float32x4_t vfpacc3xCDEF = vcvtq_f32_s32(vacc3xCDEF); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123); vfpacc3x0123 = vmulq_f32(vfpacc3x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567); vfpacc3x4567 = vmulq_f32(vfpacc3x4567, vscale4567); const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale89AB); vfpacc1x89AB = vmulq_f32(vfpacc1x89AB, vscale89AB); vfpacc2x89AB = vmulq_f32(vfpacc2x89AB, vscale89AB); vfpacc3x89AB = vmulq_f32(vfpacc3x89AB, vscale89AB); const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscaleCDEF); vfpacc1xCDEF = vmulq_f32(vfpacc1xCDEF, vscaleCDEF); vfpacc2xCDEF = vmulq_f32(vfpacc2xCDEF, vscaleCDEF); vfpacc3xCDEF = vmulq_f32(vfpacc3xCDEF, vscaleCDEF); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc0x89AB = vcvtnq_s32_f32(vfpacc0x89AB); vacc0xCDEF = vcvtnq_s32_f32(vfpacc0xCDEF); vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123); vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567); vacc1x89AB = vcvtnq_s32_f32(vfpacc1x89AB); vacc1xCDEF = vcvtnq_s32_f32(vfpacc1xCDEF); vacc2x0123 = vcvtnq_s32_f32(vfpacc2x0123); vacc2x4567 = vcvtnq_s32_f32(vfpacc2x4567); vacc2x89AB = vcvtnq_s32_f32(vfpacc2x89AB); vacc2xCDEF = vcvtnq_s32_f32(vfpacc2xCDEF); vacc3x0123 = vcvtnq_s32_f32(vfpacc3x0123); vacc3x4567 = vcvtnq_s32_f32(vfpacc3x4567); vacc3x89AB = vcvtnq_s32_f32(vfpacc3x89AB); vacc3xCDEF = vcvtnq_s32_f32(vfpacc3xCDEF); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point); const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point); int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF); int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF); int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF); int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point); const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point); int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF)); int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF)); int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF)); int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min); vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min); vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min); vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min); vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max); vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max); vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max); vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max); if (nc >= 16) { // Main case where there the 16 columns fit in the destination. vst1q_s8(c0 + 0, vout0x0123456789ABCDEF); vst1q_s8(c1 + 0, vout1x0123456789ABCDEF); vst1q_s8(c2 + 0, vout2x0123456789ABCDEF); vst1q_s8(c3 + 0, vout3x0123456789ABCDEF); // Advance to the next 16 columns. c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 16; } else { // Final case where not all of the 16 columns fit in the destination. int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF)); if (nc & 8) { vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8; vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8; vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8; vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8; vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF)); vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF)); } if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); } nc = 0; } } while (nc != 0); }
18,117
54.237805
132
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x2-minmax-fp32-scalar-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x2__scalar_fmagic( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; w = (const int32_t*) w + 2; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const int8_t*) w + 2; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; const float vscale0 = unaligned_indexed_load_f32(w, 0); vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; vfpacc3x0 *= vscale0; const float vscale1 = unaligned_indexed_load_f32(w, 1); vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; vfpacc3x1 *= vscale1; w = (const void*) ((const float*) w + 2); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point); vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point); vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc3x0 += vmagic_bias; vfpacc3x1 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point; int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point; int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point; int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point; int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point; int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); nc -= 2; } else { if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; c3[0] = (int8_t) vout3x0; } nc = 0; } } while (nc != 0); }
6,508
34.568306
116
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x2-minmax-fp32-scalar-imagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x2__scalar_imagic( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; w = (const int32_t*) w + 2; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const int8_t*) w + 2; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; const float vscale0 = unaligned_indexed_load_f32(w, 0); vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; vfpacc3x0 *= vscale0; const float vscale1 = unaligned_indexed_load_f32(w, 1); vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; vfpacc3x1 *= vscale1; w = (const void*) ((const float*) w + 2); const float vmagic_bias = params->fp32_scalar_imagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc3x0 += vmagic_bias; vfpacc3x1 += vmagic_bias; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0); int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1); int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0); int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1); int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0); int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1); int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0); int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1); const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min; vout0x0 = math_max_s32(vout0x0, vmagic_min); vout0x1 = math_max_s32(vout0x1, vmagic_min); vout1x0 = math_max_s32(vout1x0, vmagic_min); vout1x1 = math_max_s32(vout1x1, vmagic_min); vout2x0 = math_max_s32(vout2x0, vmagic_min); vout2x1 = math_max_s32(vout2x1, vmagic_min); vout3x0 = math_max_s32(vout3x0, vmagic_min); vout3x1 = math_max_s32(vout3x1, vmagic_min); const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max; vout0x0 = math_min_s32(vout0x0, vmagic_max); vout0x1 = math_min_s32(vout0x1, vmagic_max); vout1x0 = math_min_s32(vout1x0, vmagic_max); vout1x1 = math_min_s32(vout1x1, vmagic_max); vout2x0 = math_min_s32(vout2x0, vmagic_max); vout2x1 = math_min_s32(vout2x1, vmagic_max); vout3x0 = math_min_s32(vout3x0, vmagic_max); vout3x1 = math_min_s32(vout3x1, vmagic_max); const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point; vout0x0 -= vmagic_bias_less_zero_point; vout0x1 -= vmagic_bias_less_zero_point; vout1x0 -= vmagic_bias_less_zero_point; vout1x1 -= vmagic_bias_less_zero_point; vout2x0 -= vmagic_bias_less_zero_point; vout2x1 -= vmagic_bias_less_zero_point; vout3x0 -= vmagic_bias_less_zero_point; vout3x1 -= vmagic_bias_less_zero_point; if XNN_LIKELY(nc >= 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); nc -= 2; } else { if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; c3[0] = (int8_t) vout3x0; } nc = 0; } } while (nc != 0); }
6,151
31.041667
102
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x2-minmax-fp32-scalar-lrintf.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <math.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x2__scalar_lrintf( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; w = (const int32_t*) w + 2; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const int8_t*) w + 2; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; const float vscale0 = unaligned_indexed_load_f32(w, 0); vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; vfpacc3x0 *= vscale0; const float vscale1 = unaligned_indexed_load_f32(w, 1); vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; vfpacc3x1 *= vscale1; w = (const void*) ((const float*) w + 2); const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point); vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point); vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point); const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0); const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1); const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0); const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1); const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0); const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1); const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0); const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1); const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point; int32_t vout0x0 = vrndacc0x0 + voutput_zero_point; int32_t vout0x1 = vrndacc0x1 + voutput_zero_point; int32_t vout1x0 = vrndacc1x0 + voutput_zero_point; int32_t vout1x1 = vrndacc1x1 + voutput_zero_point; int32_t vout2x0 = vrndacc2x0 + voutput_zero_point; int32_t vout2x1 = vrndacc2x1 + voutput_zero_point; int32_t vout3x0 = vrndacc3x0 + voutput_zero_point; int32_t vout3x1 = vrndacc3x1 + voutput_zero_point; if XNN_LIKELY(nc >= 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); nc -= 2; } else { if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; c3[0] = (int8_t) vout3x0; } nc = 0; } } while (nc != 0); }
6,329
33.590164
100
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x2-minmax-fp32-wasm-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x2__wasm_fmagic( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; w = (const int32_t*) w + 2; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const int8_t*) w + 2; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; const float vscale0 = unaligned_indexed_load_f32(w, 0); vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; vfpacc3x0 *= vscale0; const float vscale1 = unaligned_indexed_load_f32(w, 1); vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; vfpacc3x1 *= vscale1; w = (const void*) ((const float*) w + 2); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc2x0 = __builtin_wasm_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = __builtin_wasm_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc3x0 = __builtin_wasm_max_f32(vfpacc3x0, voutput_min_less_zero_point); vfpacc3x1 = __builtin_wasm_max_f32(vfpacc3x1, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc2x0 = __builtin_wasm_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = __builtin_wasm_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc3x0 = __builtin_wasm_min_f32(vfpacc3x0, voutput_max_less_zero_point); vfpacc3x1 = __builtin_wasm_min_f32(vfpacc3x1, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc3x0 += vmagic_bias; vfpacc3x1 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point; int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point; int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point; int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point; int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point; int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); nc -= 2; } else { if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; c3[0] = (int8_t) vout3x0; } nc = 0; } } while (nc != 0); }
6,666
35.431694
116
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4-minmax-fp32-scalar-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4__scalar_fmagic( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; int32_t vacc3x2 = vacc0x2; int32_t vacc3x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const int8_t*) w + 4; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; vacc3x2 += va3 * vb2; vacc3x3 += va3 * vb3; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc1x2 = (float) vacc1x2; float vfpacc1x3 = (float) vacc1x3; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc2x2 = (float) vacc2x2; float vfpacc2x3 = (float) vacc2x3; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; float vfpacc3x2 = (float) vacc3x2; float vfpacc3x3 = (float) vacc3x3; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; vfpacc3x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; vfpacc3x1 *= vscale1; const float vscale2 = ((const float*) w)[2]; vfpacc0x2 *= vscale2; vfpacc1x2 *= vscale2; vfpacc2x2 *= vscale2; vfpacc3x2 *= vscale2; const float vscale3 = ((const float*) w)[3]; vfpacc0x3 *= vscale3; vfpacc1x3 *= vscale3; vfpacc2x3 *= vscale3; vfpacc3x3 *= vscale3; w = (const void*) ((const float*) w + 4); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point); vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point); vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point); vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point); vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point); vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point); vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point); vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point); vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point); vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point); vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point); vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point); vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point); vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point); vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point); vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point); vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point); vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point); vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc0x2 += vmagic_bias; vfpacc0x3 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc1x2 += vmagic_bias; vfpacc1x3 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc2x2 += vmagic_bias; vfpacc2x3 += vmagic_bias; vfpacc3x0 += vmagic_bias; vfpacc3x1 += vmagic_bias; vfpacc3x2 += vmagic_bias; vfpacc3x3 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point; int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point; int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point; int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point; int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point; int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point; int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point; int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point; int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2) - vmagic_bias_less_output_zero_point; int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3) - vmagic_bias_less_output_zero_point; int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point; int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point; int32_t vout3x2 = (int32_t) float_as_uint32(vfpacc3x2) - vmagic_bias_less_output_zero_point; int32_t vout3x3 = (int32_t) float_as_uint32(vfpacc3x3) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 4) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c3[2] = (int8_t) vout3x2; c3[3] = (int8_t) vout3x3; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); nc -= 4; } else { if (nc & 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; vout3x0 = vout3x2; c3 += 2; } if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; c3[0] = (int8_t) vout3x0; } nc = 0; } } while (nc != 0); }
10,547
37.217391
116
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4-minmax-fp32-scalar-imagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4__scalar_imagic( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; int32_t vacc3x2 = vacc0x2; int32_t vacc3x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const int8_t*) w + 4; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; vacc3x2 += va3 * vb2; vacc3x3 += va3 * vb3; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc1x2 = (float) vacc1x2; float vfpacc1x3 = (float) vacc1x3; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc2x2 = (float) vacc2x2; float vfpacc2x3 = (float) vacc2x3; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; float vfpacc3x2 = (float) vacc3x2; float vfpacc3x3 = (float) vacc3x3; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; vfpacc3x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; vfpacc3x1 *= vscale1; const float vscale2 = ((const float*) w)[2]; vfpacc0x2 *= vscale2; vfpacc1x2 *= vscale2; vfpacc2x2 *= vscale2; vfpacc3x2 *= vscale2; const float vscale3 = ((const float*) w)[3]; vfpacc0x3 *= vscale3; vfpacc1x3 *= vscale3; vfpacc2x3 *= vscale3; vfpacc3x3 *= vscale3; w = (const void*) ((const float*) w + 4); const float vmagic_bias = params->fp32_scalar_imagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc0x2 += vmagic_bias; vfpacc0x3 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc1x2 += vmagic_bias; vfpacc1x3 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc2x2 += vmagic_bias; vfpacc2x3 += vmagic_bias; vfpacc3x0 += vmagic_bias; vfpacc3x1 += vmagic_bias; vfpacc3x2 += vmagic_bias; vfpacc3x3 += vmagic_bias; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0); int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1); int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2); int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3); int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0); int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1); int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2); int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3); int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0); int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1); int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2); int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3); int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0); int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1); int32_t vout3x2 = (int32_t) float_as_uint32(vfpacc3x2); int32_t vout3x3 = (int32_t) float_as_uint32(vfpacc3x3); const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min; vout0x0 = math_max_s32(vout0x0, vmagic_min); vout0x1 = math_max_s32(vout0x1, vmagic_min); vout0x2 = math_max_s32(vout0x2, vmagic_min); vout0x3 = math_max_s32(vout0x3, vmagic_min); vout1x0 = math_max_s32(vout1x0, vmagic_min); vout1x1 = math_max_s32(vout1x1, vmagic_min); vout1x2 = math_max_s32(vout1x2, vmagic_min); vout1x3 = math_max_s32(vout1x3, vmagic_min); vout2x0 = math_max_s32(vout2x0, vmagic_min); vout2x1 = math_max_s32(vout2x1, vmagic_min); vout2x2 = math_max_s32(vout2x2, vmagic_min); vout2x3 = math_max_s32(vout2x3, vmagic_min); vout3x0 = math_max_s32(vout3x0, vmagic_min); vout3x1 = math_max_s32(vout3x1, vmagic_min); vout3x2 = math_max_s32(vout3x2, vmagic_min); vout3x3 = math_max_s32(vout3x3, vmagic_min); const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max; vout0x0 = math_min_s32(vout0x0, vmagic_max); vout0x1 = math_min_s32(vout0x1, vmagic_max); vout0x2 = math_min_s32(vout0x2, vmagic_max); vout0x3 = math_min_s32(vout0x3, vmagic_max); vout1x0 = math_min_s32(vout1x0, vmagic_max); vout1x1 = math_min_s32(vout1x1, vmagic_max); vout1x2 = math_min_s32(vout1x2, vmagic_max); vout1x3 = math_min_s32(vout1x3, vmagic_max); vout2x0 = math_min_s32(vout2x0, vmagic_max); vout2x1 = math_min_s32(vout2x1, vmagic_max); vout2x2 = math_min_s32(vout2x2, vmagic_max); vout2x3 = math_min_s32(vout2x3, vmagic_max); vout3x0 = math_min_s32(vout3x0, vmagic_max); vout3x1 = math_min_s32(vout3x1, vmagic_max); vout3x2 = math_min_s32(vout3x2, vmagic_max); vout3x3 = math_min_s32(vout3x3, vmagic_max); const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point; vout0x0 -= vmagic_bias_less_zero_point; vout0x1 -= vmagic_bias_less_zero_point; vout0x2 -= vmagic_bias_less_zero_point; vout0x3 -= vmagic_bias_less_zero_point; vout1x0 -= vmagic_bias_less_zero_point; vout1x1 -= vmagic_bias_less_zero_point; vout1x2 -= vmagic_bias_less_zero_point; vout1x3 -= vmagic_bias_less_zero_point; vout2x0 -= vmagic_bias_less_zero_point; vout2x1 -= vmagic_bias_less_zero_point; vout2x2 -= vmagic_bias_less_zero_point; vout2x3 -= vmagic_bias_less_zero_point; vout3x0 -= vmagic_bias_less_zero_point; vout3x1 -= vmagic_bias_less_zero_point; vout3x2 -= vmagic_bias_less_zero_point; vout3x3 -= vmagic_bias_less_zero_point; if XNN_LIKELY(nc >= 4) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c3[2] = (int8_t) vout3x2; c3[3] = (int8_t) vout3x3; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); nc -= 4; } else { if (nc & 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; vout3x0 = vout3x2; c3 += 2; } if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; c3[0] = (int8_t) vout3x0; } nc = 0; } } while (nc != 0); }
9,910
32.825939
102
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4-minmax-fp32-scalar-lrintf.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <math.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4__scalar_lrintf( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; int32_t vacc3x2 = vacc0x2; int32_t vacc3x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const int8_t*) w + 4; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; vacc3x2 += va3 * vb2; vacc3x3 += va3 * vb3; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc1x2 = (float) vacc1x2; float vfpacc1x3 = (float) vacc1x3; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc2x2 = (float) vacc2x2; float vfpacc2x3 = (float) vacc2x3; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; float vfpacc3x2 = (float) vacc3x2; float vfpacc3x3 = (float) vacc3x3; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; vfpacc3x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; vfpacc3x1 *= vscale1; const float vscale2 = ((const float*) w)[2]; vfpacc0x2 *= vscale2; vfpacc1x2 *= vscale2; vfpacc2x2 *= vscale2; vfpacc3x2 *= vscale2; const float vscale3 = ((const float*) w)[3]; vfpacc0x3 *= vscale3; vfpacc1x3 *= vscale3; vfpacc2x3 *= vscale3; vfpacc3x3 *= vscale3; w = (const void*) ((const float*) w + 4); const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point); vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point); vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point); vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point); vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point); vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point); vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point); vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point); vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point); vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point); vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point); vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point); vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point); vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point); vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point); vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point); vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point); vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point); vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point); const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0); const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1); const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2); const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3); const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0); const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1); const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2); const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3); const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0); const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1); const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2); const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3); const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0); const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1); const int32_t vrndacc3x2 = (int32_t) lrintf(vfpacc3x2); const int32_t vrndacc3x3 = (int32_t) lrintf(vfpacc3x3); const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point; int32_t vout0x0 = vrndacc0x0 + voutput_zero_point; int32_t vout0x1 = vrndacc0x1 + voutput_zero_point; int32_t vout0x2 = vrndacc0x2 + voutput_zero_point; int32_t vout0x3 = vrndacc0x3 + voutput_zero_point; int32_t vout1x0 = vrndacc1x0 + voutput_zero_point; int32_t vout1x1 = vrndacc1x1 + voutput_zero_point; int32_t vout1x2 = vrndacc1x2 + voutput_zero_point; int32_t vout1x3 = vrndacc1x3 + voutput_zero_point; int32_t vout2x0 = vrndacc2x0 + voutput_zero_point; int32_t vout2x1 = vrndacc2x1 + voutput_zero_point; int32_t vout2x2 = vrndacc2x2 + voutput_zero_point; int32_t vout2x3 = vrndacc2x3 + voutput_zero_point; int32_t vout3x0 = vrndacc3x0 + voutput_zero_point; int32_t vout3x1 = vrndacc3x1 + voutput_zero_point; int32_t vout3x2 = vrndacc3x2 + voutput_zero_point; int32_t vout3x3 = vrndacc3x3 + voutput_zero_point; if XNN_LIKELY(nc >= 4) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c3[2] = (int8_t) vout3x2; c3[3] = (int8_t) vout3x3; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); nc -= 4; } else { if (nc & 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; vout3x0 = vout3x2; c3 += 2; } if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; c3[0] = (int8_t) vout3x0; } nc = 0; } } while (nc != 0); }
10,272
36.221014
100
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4-minmax-fp32-wasm-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4__wasm_fmagic( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; int32_t vacc3x2 = vacc0x2; int32_t vacc3x3 = vacc0x3; w = (const int32_t*) w + 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const int8_t*) w + 4; vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; vacc3x2 += va3 * vb2; vacc3x3 += va3 * vb3; k -= sizeof(int8_t); } while (k != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc1x2 = (float) vacc1x2; float vfpacc1x3 = (float) vacc1x3; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc2x2 = (float) vacc2x2; float vfpacc2x3 = (float) vacc2x3; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; float vfpacc3x2 = (float) vacc3x2; float vfpacc3x3 = (float) vacc3x3; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; vfpacc1x0 *= vscale0; vfpacc2x0 *= vscale0; vfpacc3x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; vfpacc1x1 *= vscale1; vfpacc2x1 *= vscale1; vfpacc3x1 *= vscale1; const float vscale2 = ((const float*) w)[2]; vfpacc0x2 *= vscale2; vfpacc1x2 *= vscale2; vfpacc2x2 *= vscale2; vfpacc3x2 *= vscale2; const float vscale3 = ((const float*) w)[3]; vfpacc0x3 *= vscale3; vfpacc1x3 *= vscale3; vfpacc2x3 *= vscale3; vfpacc3x3 *= vscale3; w = (const void*) ((const float*) w + 4); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc0x2 = __builtin_wasm_max_f32(vfpacc0x2, voutput_min_less_zero_point); vfpacc0x3 = __builtin_wasm_max_f32(vfpacc0x3, voutput_min_less_zero_point); vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc1x2 = __builtin_wasm_max_f32(vfpacc1x2, voutput_min_less_zero_point); vfpacc1x3 = __builtin_wasm_max_f32(vfpacc1x3, voutput_min_less_zero_point); vfpacc2x0 = __builtin_wasm_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = __builtin_wasm_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc2x2 = __builtin_wasm_max_f32(vfpacc2x2, voutput_min_less_zero_point); vfpacc2x3 = __builtin_wasm_max_f32(vfpacc2x3, voutput_min_less_zero_point); vfpacc3x0 = __builtin_wasm_max_f32(vfpacc3x0, voutput_min_less_zero_point); vfpacc3x1 = __builtin_wasm_max_f32(vfpacc3x1, voutput_min_less_zero_point); vfpacc3x2 = __builtin_wasm_max_f32(vfpacc3x2, voutput_min_less_zero_point); vfpacc3x3 = __builtin_wasm_max_f32(vfpacc3x3, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc0x2 = __builtin_wasm_min_f32(vfpacc0x2, voutput_max_less_zero_point); vfpacc0x3 = __builtin_wasm_min_f32(vfpacc0x3, voutput_max_less_zero_point); vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc1x2 = __builtin_wasm_min_f32(vfpacc1x2, voutput_max_less_zero_point); vfpacc1x3 = __builtin_wasm_min_f32(vfpacc1x3, voutput_max_less_zero_point); vfpacc2x0 = __builtin_wasm_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = __builtin_wasm_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc2x2 = __builtin_wasm_min_f32(vfpacc2x2, voutput_max_less_zero_point); vfpacc2x3 = __builtin_wasm_min_f32(vfpacc2x3, voutput_max_less_zero_point); vfpacc3x0 = __builtin_wasm_min_f32(vfpacc3x0, voutput_max_less_zero_point); vfpacc3x1 = __builtin_wasm_min_f32(vfpacc3x1, voutput_max_less_zero_point); vfpacc3x2 = __builtin_wasm_min_f32(vfpacc3x2, voutput_max_less_zero_point); vfpacc3x3 = __builtin_wasm_min_f32(vfpacc3x3, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc0x2 += vmagic_bias; vfpacc0x3 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc1x2 += vmagic_bias; vfpacc1x3 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc2x2 += vmagic_bias; vfpacc2x3 += vmagic_bias; vfpacc3x0 += vmagic_bias; vfpacc3x1 += vmagic_bias; vfpacc3x2 += vmagic_bias; vfpacc3x3 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point; int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point; int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point; int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point; int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point; int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point; int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point; int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point; int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2) - vmagic_bias_less_output_zero_point; int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3) - vmagic_bias_less_output_zero_point; int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point; int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point; int32_t vout3x2 = (int32_t) float_as_uint32(vfpacc3x2) - vmagic_bias_less_output_zero_point; int32_t vout3x3 = (int32_t) float_as_uint32(vfpacc3x3) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 4) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c3[2] = (int8_t) vout3x2; c3[3] = (int8_t) vout3x3; a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); nc -= 4; } else { if (nc & 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; vout3x0 = vout3x2; c3 += 2; } if (nc & 1) { c0[0] = (int8_t) vout0x0; c1[0] = (int8_t) vout1x0; c2[0] = (int8_t) vout2x0; c3[0] = (int8_t) vout3x0; } nc = 0; } } while (nc != 0); }
10,865
38.369565
116
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2-minmax-fp32-avx-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2__avx_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c3 = (int8_t) _mm_extract_epi8(vout, 12); } nc = 0; } } while (nc != 0); }
10,833
40.509579
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2-minmax-fp32-avx-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2__avx_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c3 = (int8_t) _mm_extract_epi8(vout, 12); } nc = 0; } } while (nc != 0); }
10,947
40.627376
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2-minmax-fp32-sse2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2__sse2_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01); const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01); const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23); const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23); const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_cvtsi128_si32(vout); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c3 = (int8_t) _mm_extract_epi16(vout, 6); } nc = 0; } } while (nc != 0); }
11,517
41.977612
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2-minmax-fp32-sse2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2__sse2_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_cvtsi128_si32(vout); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c3 = (int8_t) _mm_extract_epi16(vout, 6); } nc = 0; } } while (nc != 0); }
11,607
42.313433
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2-minmax-fp32-sse41-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2__sse41_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c3 = (int8_t) _mm_extract_epi8(vout, 12); } nc = 0; } } while (nc != 0); }
10,835
40.517241
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2-minmax-fp32-sse41-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2__sse41_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c3 = (int8_t) _mm_extract_epi8(vout, 12); } nc = 0; } } while (nc != 0); }
10,949
40.634981
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; v128_t vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0); a0 += 8; const v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1); a1 += 8; const v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2); a2 += 8; const v128_t vxa3 = wasm_i16x8_load8x8((const v128_t*) a3); a3 += 8; const v128_t vb01 = wasm_v128_load(w); const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01); const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0)); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1)); const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16); const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23); const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2)); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 3, 3, 3, 3), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const v128_t vxa0 = wasm_i16x8_load8x8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const v128_t vxa1 = wasm_i16x8_load8x8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const v128_t vxa2 = wasm_i16x8_load8x8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const v128_t vxa3 = wasm_i16x8_load8x8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const v128_t vxb0 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0)); if (k > 2 * sizeof(int8_t)) { const v128_t vxb1 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1)); if (k > 4 * sizeof(int8_t)) { const v128_t vxb2 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2)); } } } vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123); const v128_t vscale0123 = wasm_v128_load(w); w = (const void*) ((const float*) w + 4); vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale0123); vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale0123); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c0, vout, 0); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c2, vout, 2); wasm_v128_store32_lane(c3, vout, 3); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c0, vout, 0); c0 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c2, vout, 4); c2 += 2; wasm_v128_store16_lane(c3, vout, 6); c3 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c0, vout, 0); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c2, vout, 8); wasm_v128_store8_lane(c3, vout, 12); } nc = 0; } } while (nc != 0); }
10,237
38.992188
134
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2-minmax-fp32-wasmsimd-dot16x2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; v128_t vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0); a0 += 8; const v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1); a1 += 8; const v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2); a2 += 8; const v128_t vxa3 = wasm_i16x8_load8x8((const v128_t*) a3); a3 += 8; const v128_t vxb0 = wasm_i16x8_load8x8(w); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0)); const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1)); const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2)); const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 3, 3, 3, 3), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const v128_t vxa0 = wasm_i16x8_load8x8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const v128_t vxa1 = wasm_i16x8_load8x8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const v128_t vxa2 = wasm_i16x8_load8x8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const v128_t vxa3 = wasm_i16x8_load8x8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const v128_t vxb0 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0)); if (k > 2 * sizeof(int8_t)) { const v128_t vxb1 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1)); if (k > 4 * sizeof(int8_t)) { const v128_t vxb2 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2)); } } } vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123); const v128_t vscale0123 = wasm_v128_load(w); w = (const void*) ((const float*) w + 4); vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale0123); vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale0123); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c0, vout, 0); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c2, vout, 2); wasm_v128_store32_lane(c3, vout, 3); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c0, vout, 0); c0 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c2, vout, 4); c2 += 2; wasm_v128_store16_lane(c3, vout, 6); c3 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c0, vout, 0); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c2, vout, 8); wasm_v128_store8_lane(c3, vout, 12); } nc = 0; } } while (nc != 0); }
10,137
38.913386
134
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2-minmax-fp32-xop-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2__xop_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc3x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c3 = (int8_t) _mm_extract_epi8(vout, 12); } nc = 0; } } while (nc != 0); }
10,522
38.709434
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2-minmax-fp32-xop-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2__xop_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc3x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123); } } } __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c3 = (int8_t) _mm_extract_epi8(vout, 12); } nc = 0; } } while (nc != 0); }
10,636
38.838951
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2s4-minmax-fp32-avx-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2s4__avx_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c3 = (int8_t) _mm_extract_epi8(vout, 12); } nc = 0; } } while (nc != 0); }
7,879
38.4
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2s4-minmax-fp32-avx-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2s4__avx_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c3 = (int8_t) _mm_extract_epi8(vout, 12); } nc = 0; } } while (nc != 0); }
7,993
38.574257
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2s4-minmax-fp32-sse2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2s4__sse2_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01); const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01); const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23); const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23); const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_cvtsi128_si32(vout); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c3 = (int8_t) _mm_extract_epi16(vout, 6); } nc = 0; } } while (nc != 0); }
8,395
39.560386
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2s4-minmax-fp32-sse2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2s4__sse2_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(vout)); vout = _mm_shuffle_epi32(vout, _MM_SHUFFLE(0, 3, 2, 1)); unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_cvtsi128_si32(vout); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c3 = (int8_t) _mm_extract_epi16(vout, 6); } nc = 0; } } while (nc != 0); }
8,485
39.995169
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2s4-minmax-fp32-sse41-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2s4__sse41_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c3 = (int8_t) _mm_extract_epi8(vout, 12); } nc = 0; } } while (nc != 0); }
7,881
38.41
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2s4-minmax-fp32-sse41-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2s4__sse41_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c3 = (int8_t) _mm_extract_epi8(vout, 12); } nc = 0; } } while (nc != 0); }
7,995
38.584158
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2s4-minmax-fp32-xop-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2s4__xop_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb0, vacc3x0123); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb1, vacc3x0123); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb2, vacc3x0123); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb3, vacc3x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c3 = (int8_t) _mm_extract_epi8(vout, 12); } nc = 0; } } while (nc != 0); }
7,822
36.975728
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2s4__wasmsimd_dot16x2_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; v128_t vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0); a0 += 8; v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1); a1 += 8; v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2); a2 += 8; v128_t vxa3 = wasm_i16x8_load8x8((const v128_t*) a3); a3 += 8; const v128_t vb01 = wasm_v128_load(w); const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01); const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb0)); vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb1)); vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4); const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16); const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23); const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb2)); vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb3)); w = (const int8_t*) w + 32; k -= 8 * sizeof(int8_t); } while (k != 0); vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123); const v128_t vscale0123 = wasm_v128_load(w); w = (const float*) w + 4; vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale0123); vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale0123); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c0, vout, 0); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c2, vout, 2); wasm_v128_store32_lane(c3, vout, 3); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c0, vout, 0); c0 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c2, vout, 4); c2 += 2; wasm_v128_store16_lane(c3, vout, 6); c3 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c0, vout, 0); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c2, vout, 8); wasm_v128_store8_lane(c3, vout, 12); } nc = 0; } } while (nc != 0); }
7,724
37.242574
134
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2s4__wasmsimd_dot16x2_ld64( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; v128_t vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0); a0 += 8; v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1); a1 += 8; v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2); a2 += 8; v128_t vxa3 = wasm_i16x8_load8x8((const v128_t*) a3); a3 += 8; const v128_t vxb0 = wasm_i16x8_load8x8(w); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb0)); vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4); const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb1)); vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4); const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb2)); vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4); const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb3)); w = (const int8_t*) w + 32; k -= 8 * sizeof(int8_t); } while (k != 0); vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123); const v128_t vscale0123 = wasm_v128_load(w); w = (const float*) w + 4; vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale0123); vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale0123); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c0, vout, 0); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c2, vout, 2); wasm_v128_store32_lane(c3, vout, 3); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c0, vout, 0); c0 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c2, vout, 4); c2 += 2; wasm_v128_store16_lane(c3, vout, 6); c3 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c0, vout, 0); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c2, vout, 8); wasm_v128_store8_lane(c3, vout, 12); } nc = 0; } } while (nc != 0); }
7,624
37.125
134
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x4c2s4-minmax-fp32-xop-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/gemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x4c2s4__xop_ld128( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb0, vacc3x0123); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb1, vacc3x0123); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb2, vacc3x0123); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb3, vacc3x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale0123 = _mm_loadu_ps((const float*) w); w = (const void*) ((const float*) w + 4); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale0123); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale0123); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c0 = (int8_t) _mm_extract_epi8(vout, 0); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c3 = (int8_t) _mm_extract_epi8(vout, 12); } nc = 0; } } while (nc != 0); }
7,708
36.789216
108
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-neon-mlal-lane-prfm.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/gemm.h> #include <xnnpack/prefetch.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__neon_mlal_lane_prfm( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t va3 = vld1_s8(a3); a3 += 8; const int16x8_t vxa3 = vmovl_s8(va3); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3); xnn_prefetch_to_l1((const int8_t*) w + 448); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa3), 3); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa3), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const int16x8_t vxa3 = vmovl_s8(va3); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2); } } } } } } } float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123); float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567); float32x4_t vfpacc3x0123 = vcvtq_f32_s32(vacc3x0123); float32x4_t vfpacc3x4567 = vcvtq_f32_s32(vacc3x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123); vfpacc3x0123 = vmulq_f32(vfpacc3x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567); vfpacc3x4567 = vmulq_f32(vfpacc3x4567, vscale4567); const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias); vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias)); vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias)); vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias)); vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias)); vacc2x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc2x0123, vmagic_bias)); vacc2x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc2x4567, vmagic_bias)); vacc3x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc3x0123, vmagic_bias)); vacc3x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc3x4567, vmagic_bias)); const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_output_zero_point); vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point); vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point); vacc1x0123 = vqsubq_s32(vacc1x0123, vmagic_bias_less_output_zero_point); vacc1x4567 = vqsubq_s32(vacc1x4567, vmagic_bias_less_output_zero_point); vacc2x0123 = vqsubq_s32(vacc2x0123, vmagic_bias_less_output_zero_point); vacc2x4567 = vqsubq_s32(vacc2x4567, vmagic_bias_less_output_zero_point); vacc3x0123 = vqsubq_s32(vacc3x0123, vmagic_bias_less_output_zero_point); vacc3x4567 = vqsubq_s32(vacc3x4567, vmagic_bias_less_output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); } nc = 0; } } while (nc != 0); }
24,574
61.058081
125
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-neon-mlal-lane.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/gemm.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__neon_mlal_lane( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t va3 = vld1_s8(a3); a3 += 8; const int16x8_t vxa3 = vmovl_s8(va3); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa3), 3); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa3), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const int16x8_t vxa3 = vmovl_s8(va3); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2); } } } } } } } float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123); float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567); float32x4_t vfpacc3x0123 = vcvtq_f32_s32(vacc3x0123); float32x4_t vfpacc3x4567 = vcvtq_f32_s32(vacc3x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123); vfpacc3x0123 = vmulq_f32(vfpacc3x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567); vfpacc3x4567 = vmulq_f32(vfpacc3x4567, vscale4567); const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias); vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias)); vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias)); vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias)); vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias)); vacc2x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc2x0123, vmagic_bias)); vacc2x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc2x4567, vmagic_bias)); vacc3x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc3x0123, vmagic_bias)); vacc3x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc3x4567, vmagic_bias)); const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_output_zero_point); vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point); vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point); vacc1x0123 = vqsubq_s32(vacc1x0123, vmagic_bias_less_output_zero_point); vacc1x4567 = vqsubq_s32(vacc1x4567, vmagic_bias_less_output_zero_point); vacc2x0123 = vqsubq_s32(vacc2x0123, vmagic_bias_less_output_zero_point); vacc2x4567 = vqsubq_s32(vacc2x4567, vmagic_bias_less_output_zero_point); vacc3x0123 = vqsubq_s32(vacc3x0123, vmagic_bias_less_output_zero_point); vacc3x4567 = vqsubq_s32(vacc3x4567, vmagic_bias_less_output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); } nc = 0; } } while (nc != 0); }
24,488
61.154822
125
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-neonv8-mlal-lane-prfm.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/prefetch.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__neonv8_mlal_lane_prfm( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t va3 = vld1_s8(a3); a3 += 8; const int16x8_t vxa3 = vmovl_s8(va3); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3); xnn_prefetch_to_l1((const int8_t*) w + 448); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa3), 3); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa3), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const int16x8_t vxa3 = vmovl_s8(va3); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2); } } } } } } } float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123); float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567); float32x4_t vfpacc3x0123 = vcvtq_f32_s32(vacc3x0123); float32x4_t vfpacc3x4567 = vcvtq_f32_s32(vacc3x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123); vfpacc3x0123 = vmulq_f32(vfpacc3x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567); vfpacc3x4567 = vmulq_f32(vfpacc3x4567, vscale4567); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123); vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567); vacc2x0123 = vcvtnq_s32_f32(vfpacc2x0123); vacc2x4567 = vcvtnq_s32_f32(vfpacc2x4567); vacc3x0123 = vcvtnq_s32_f32(vfpacc3x0123); vacc3x4567 = vcvtnq_s32_f32(vfpacc3x4567); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); } nc = 0; } } while (nc != 0); }
24,196
60.258228
112
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8-minmax-fp32-neonv8-mlal-lane.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8__neonv8_mlal_lane( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t va3 = vld1_s8(a3); a3 += 8; const int16x8_t vxa3 = vmovl_s8(va3); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa3), 3); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa3), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const int16x8_t vxa3 = vmovl_s8(va3); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2); vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2); } } } } } } } float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123); float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567); float32x4_t vfpacc3x0123 = vcvtq_f32_s32(vacc3x0123); float32x4_t vfpacc3x4567 = vcvtq_f32_s32(vacc3x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123); vfpacc3x0123 = vmulq_f32(vfpacc3x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567); vfpacc3x4567 = vmulq_f32(vfpacc3x4567, vscale4567); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123); vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567); vacc2x0123 = vcvtnq_s32_f32(vfpacc2x0123); vacc2x4567 = vcvtnq_s32_f32(vfpacc2x4567); vacc3x0123 = vcvtnq_s32_f32(vfpacc3x0123); vacc3x4567 = vcvtnq_s32_f32(vfpacc3x4567); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); } nc = 0; } } while (nc != 0); }
24,110
60.351145
112
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-4x8c4-minmax-fp32-neondot.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/c4-neondot.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_4x8c4__neondot( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } // Loop over groups of 8 columns. do { // Initialize accumulators with bias. 8 bias values are loaded from the // weight matrix, at the start of the group of 8 columns. int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; // Inner accumulation loop along the 8 columns. size_t k = kc; // 2x partial unrolled loop to load 8 bytes at a time. while (k >= 8 * sizeof(int8_t)) { // Load a 4x8 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8; const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8; const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8; // Load a 8x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 4x8 * 8x8 --> 4x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1); k -= 8 * sizeof(int8_t); } // Handle up to 4 final positions of `k` if XNN_UNLIKELY(k != 0) { // Load a 4x4 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4; const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 4; const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 4; // Load a 4x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 4x4 * 4x8 --> 4x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); } float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123); float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567); float32x4_t vfpacc3x0123 = vcvtq_f32_s32(vacc3x0123); float32x4_t vfpacc3x4567 = vcvtq_f32_s32(vacc3x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123); vfpacc3x0123 = vmulq_f32(vfpacc3x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567); vfpacc3x4567 = vmulq_f32(vfpacc3x4567, vscale4567); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123); vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567); vacc2x0123 = vcvtnq_s32_f32(vfpacc2x0123); vacc2x4567 = vcvtnq_s32_f32(vfpacc2x4567); vacc3x0123 = vcvtnq_s32_f32(vfpacc3x0123); vacc3x4567 = vcvtnq_s32_f32(vfpacc3x4567); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { // Main case where there the 8 columns fit in the destination. vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); // Advance to the next 8 columns. c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); nc -= 8; } else { // Final case where not all of the 8 columns fit in the destination. if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); } nc = 0; } } while (nc != 0); }
11,471
47.201681
132
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-6x8c4-minmax-fp32-neondot.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/c4-neondot.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_6x8c4__neondot( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const int8_t* a4 = (const int8_t*) ((uintptr_t) a3 + a_stride); int8_t* c4 = (int8_t*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const int8_t* a5 = (const int8_t*) ((uintptr_t) a4 + a_stride); int8_t* c5 = (int8_t*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } // Loop over groups of 8 columns. do { // Initialize accumulators with bias. 8 bias values are loaded from the // weight matrix, at the start of the group of 8 columns. int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; int32x4_t vacc4x0123 = vacc0x0123; int32x4_t vacc4x4567 = vacc0x4567; int32x4_t vacc5x0123 = vacc0x0123; int32x4_t vacc5x4567 = vacc0x4567; // Inner accumulation loop along the 8 columns. size_t k = kc; // 2x partial unrolled loop to load 8 bytes at a time. while (k >= 8 * sizeof(int8_t)) { // Load a 6x8 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8; const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8; const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8; const int8x8_t va4x01234567 = vld1_s8(a4); a4 += 8; const int8x8_t va5x01234567 = vld1_s8(a5); a5 += 8; // Load a 8x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 6x8 * 8x8 --> 6x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0); vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0); vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0); vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0); vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1); vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb4567x0123, va4x01234567, 1); vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb4567x4567, va4x01234567, 1); vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb4567x0123, va5x01234567, 1); vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb4567x4567, va5x01234567, 1); k -= 8 * sizeof(int8_t); } // Handle up to 4 final positions of `k` if XNN_UNLIKELY(k != 0) { // Load a 6x4 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4; const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 4; const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 4; const int8x8_t va4x01234567 = vld1_s8(a4); a4 += 4; const int8x8_t va5x01234567 = vld1_s8(a5); a5 += 4; // Load a 4x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 6x4 * 4x8 --> 6x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0); vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0); vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0); vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0); } float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123); float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567); float32x4_t vfpacc3x0123 = vcvtq_f32_s32(vacc3x0123); float32x4_t vfpacc3x4567 = vcvtq_f32_s32(vacc3x4567); float32x4_t vfpacc4x0123 = vcvtq_f32_s32(vacc4x0123); float32x4_t vfpacc4x4567 = vcvtq_f32_s32(vacc4x4567); float32x4_t vfpacc5x0123 = vcvtq_f32_s32(vacc5x0123); float32x4_t vfpacc5x4567 = vcvtq_f32_s32(vacc5x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123); vfpacc3x0123 = vmulq_f32(vfpacc3x0123, vscale0123); vfpacc4x0123 = vmulq_f32(vfpacc4x0123, vscale0123); vfpacc5x0123 = vmulq_f32(vfpacc5x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567); vfpacc3x4567 = vmulq_f32(vfpacc3x4567, vscale4567); vfpacc4x4567 = vmulq_f32(vfpacc4x4567, vscale4567); vfpacc5x4567 = vmulq_f32(vfpacc5x4567, vscale4567); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123); vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567); vacc2x0123 = vcvtnq_s32_f32(vfpacc2x0123); vacc2x4567 = vcvtnq_s32_f32(vfpacc2x4567); vacc3x0123 = vcvtnq_s32_f32(vfpacc3x0123); vacc3x4567 = vcvtnq_s32_f32(vfpacc3x4567); vacc4x0123 = vcvtnq_s32_f32(vfpacc4x0123); vacc4x4567 = vcvtnq_s32_f32(vfpacc4x4567); vacc5x0123 = vcvtnq_s32_f32(vfpacc5x0123); vacc5x4567 = vcvtnq_s32_f32(vfpacc5x4567); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point); const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point); const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); int8x16_t vout4x01234567_5x01234567 = vqmovn_high_s16(vqmovn_s16(vacc4x01234567), vacc5x01234567); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point); const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point); const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); int8x16_t vout4x01234567_5x01234567 = vcombine_s8(vqmovn_s16(vacc4x01234567), vqmovn_s16(vacc5x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); vout4x01234567_5x01234567 = vmaxq_s8(vout4x01234567_5x01234567, voutput_min); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); vout4x01234567_5x01234567 = vminq_s8(vout4x01234567_5x01234567, voutput_max); if (nc >= 8) { // Main case where there the 8 columns fit in the destination. vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c4 + 0, vget_low_s8(vout4x01234567_5x01234567)); vst1_s8(c5 + 0, vget_high_s8(vout4x01234567_5x01234567)); // Advance to the next 8 columns. c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c4 = (int8_t*) ((uintptr_t) c4 + cn_stride); c5 = (int8_t*) ((uintptr_t) c5 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); a4 = (const int8_t*) ((uintptr_t) a4 - kc); a5 = (const int8_t*) ((uintptr_t) a5 - kc); nc -= 8; } else { // Final case where not all of the 8 columns fit in the destination. if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c4, vreinterpretq_u32_s8(vout4x01234567_5x01234567), 0); c4 += 4; vst1q_lane_u32((void*) c5, vreinterpretq_u32_s8(vout4x01234567_5x01234567), 2); c5 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c4, vreinterpretq_u16_s8(vout4x01234567_5x01234567), 0); c4 += 2; vst1q_lane_u16((void*) c5, vreinterpretq_u16_s8(vout4x01234567_5x01234567), 4); c5 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c4, vout4x01234567_5x01234567, 0); vst1q_lane_s8(c5, vout4x01234567_5x01234567, 8); } nc = 0; } } while (nc != 0); }
15,729
50.743421
132
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-8x8c4-minmax-fp32-neondot.c
// Auto-generated file. Do not edit! // Template: src/qs8-gemm/c4-neondot.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_8x8c4__neondot( size_t mr, size_t nc, size_t kc, const int8_t* restrict a, size_t a_stride, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 8); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); const int8_t* a0 = a; int8_t* c0 = c; const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride); int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride); int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const int8_t* a4 = (const int8_t*) ((uintptr_t) a3 + a_stride); int8_t* c4 = (int8_t*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const int8_t* a5 = (const int8_t*) ((uintptr_t) a4 + a_stride); int8_t* c5 = (int8_t*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr < 6) { a5 = a4; c5 = c4; } const int8_t* a6 = (const int8_t*) ((uintptr_t) a5 + a_stride); int8_t* c6 = (int8_t*) ((uintptr_t) c5 + cm_stride); if XNN_UNPREDICTABLE(mr <= 6) { a6 = a5; c6 = c5; } const int8_t* a7 = (const int8_t*) ((uintptr_t) a6 + a_stride); int8_t* c7 = (int8_t*) ((uintptr_t) c6 + cm_stride); if XNN_UNPREDICTABLE(mr != 8) { a7 = a6; c7 = c6; } // Loop over groups of 8 columns. do { // Initialize accumulators with bias. 8 bias values are loaded from the // weight matrix, at the start of the group of 8 columns. int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; int32x4_t vacc4x0123 = vacc0x0123; int32x4_t vacc4x4567 = vacc0x4567; int32x4_t vacc5x0123 = vacc0x0123; int32x4_t vacc5x4567 = vacc0x4567; int32x4_t vacc6x0123 = vacc0x0123; int32x4_t vacc6x4567 = vacc0x4567; int32x4_t vacc7x0123 = vacc0x0123; int32x4_t vacc7x4567 = vacc0x4567; // Inner accumulation loop along the 8 columns. size_t k = kc; // 2x partial unrolled loop to load 8 bytes at a time. while (k >= 8 * sizeof(int8_t)) { // Load a 8x8 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8; const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8; const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8; const int8x8_t va4x01234567 = vld1_s8(a4); a4 += 8; const int8x8_t va5x01234567 = vld1_s8(a5); a5 += 8; const int8x8_t va6x01234567 = vld1_s8(a6); a6 += 8; const int8x8_t va7x01234567 = vld1_s8(a7); a7 += 8; // Load a 8x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 8x8 * 8x8 --> 8x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0); vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0); vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0); vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0); vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb0123x0123, va6x01234567, 0); vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb0123x4567, va6x01234567, 0); vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb0123x0123, va7x01234567, 0); vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb0123x4567, va7x01234567, 0); vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1); vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb4567x0123, va4x01234567, 1); vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb4567x4567, va4x01234567, 1); vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb4567x0123, va5x01234567, 1); vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb4567x4567, va5x01234567, 1); vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb4567x0123, va6x01234567, 1); vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb4567x4567, va6x01234567, 1); vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb4567x0123, va7x01234567, 1); vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb4567x4567, va7x01234567, 1); k -= 8 * sizeof(int8_t); } // Handle up to 4 final positions of `k` if XNN_UNLIKELY(k != 0) { // Load a 8x4 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 4; const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 4; const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 4; const int8x8_t va4x01234567 = vld1_s8(a4); a4 += 4; const int8x8_t va5x01234567 = vld1_s8(a5); a5 += 4; const int8x8_t va6x01234567 = vld1_s8(a6); a6 += 4; const int8x8_t va7x01234567 = vld1_s8(a7); a7 += 4; // Load a 4x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 8x4 * 4x8 --> 8x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0); vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0); vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0); vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0); vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb0123x0123, va6x01234567, 0); vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb0123x4567, va6x01234567, 0); vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb0123x0123, va7x01234567, 0); vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb0123x4567, va7x01234567, 0); } float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123); float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567); float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123); float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567); float32x4_t vfpacc3x0123 = vcvtq_f32_s32(vacc3x0123); float32x4_t vfpacc3x4567 = vcvtq_f32_s32(vacc3x4567); float32x4_t vfpacc4x0123 = vcvtq_f32_s32(vacc4x0123); float32x4_t vfpacc4x4567 = vcvtq_f32_s32(vacc4x4567); float32x4_t vfpacc5x0123 = vcvtq_f32_s32(vacc5x0123); float32x4_t vfpacc5x4567 = vcvtq_f32_s32(vacc5x4567); float32x4_t vfpacc6x0123 = vcvtq_f32_s32(vacc6x0123); float32x4_t vfpacc6x4567 = vcvtq_f32_s32(vacc6x4567); float32x4_t vfpacc7x0123 = vcvtq_f32_s32(vacc7x0123); float32x4_t vfpacc7x4567 = vcvtq_f32_s32(vacc7x4567); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123); vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123); vfpacc3x0123 = vmulq_f32(vfpacc3x0123, vscale0123); vfpacc4x0123 = vmulq_f32(vfpacc4x0123, vscale0123); vfpacc5x0123 = vmulq_f32(vfpacc5x0123, vscale0123); vfpacc6x0123 = vmulq_f32(vfpacc6x0123, vscale0123); vfpacc7x0123 = vmulq_f32(vfpacc7x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567); vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567); vfpacc3x4567 = vmulq_f32(vfpacc3x4567, vscale4567); vfpacc4x4567 = vmulq_f32(vfpacc4x4567, vscale4567); vfpacc5x4567 = vmulq_f32(vfpacc5x4567, vscale4567); vfpacc6x4567 = vmulq_f32(vfpacc6x4567, vscale4567); vfpacc7x4567 = vmulq_f32(vfpacc7x4567, vscale4567); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123); vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567); vacc2x0123 = vcvtnq_s32_f32(vfpacc2x0123); vacc2x4567 = vcvtnq_s32_f32(vfpacc2x4567); vacc3x0123 = vcvtnq_s32_f32(vfpacc3x0123); vacc3x4567 = vcvtnq_s32_f32(vfpacc3x4567); vacc4x0123 = vcvtnq_s32_f32(vfpacc4x0123); vacc4x4567 = vcvtnq_s32_f32(vfpacc4x4567); vacc5x0123 = vcvtnq_s32_f32(vfpacc5x0123); vacc5x4567 = vcvtnq_s32_f32(vfpacc5x4567); vacc6x0123 = vcvtnq_s32_f32(vfpacc6x0123); vacc6x4567 = vcvtnq_s32_f32(vfpacc6x4567); vacc7x0123 = vcvtnq_s32_f32(vfpacc7x0123); vacc7x4567 = vcvtnq_s32_f32(vfpacc7x4567); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point); const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point); const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point); const int16x8_t vacc6x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x0123), vacc6x4567), voutput_zero_point); const int16x8_t vacc7x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x0123), vacc7x4567), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); int8x16_t vout4x01234567_5x01234567 = vqmovn_high_s16(vqmovn_s16(vacc4x01234567), vacc5x01234567); int8x16_t vout6x01234567_7x01234567 = vqmovn_high_s16(vqmovn_s16(vacc6x01234567), vacc7x01234567); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point); const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point); const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point); const int16x8_t vacc6x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x0123), vqmovn_s32(vacc6x4567)), voutput_zero_point); const int16x8_t vacc7x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x0123), vqmovn_s32(vacc7x4567)), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); int8x16_t vout4x01234567_5x01234567 = vcombine_s8(vqmovn_s16(vacc4x01234567), vqmovn_s16(vacc5x01234567)); int8x16_t vout6x01234567_7x01234567 = vcombine_s8(vqmovn_s16(vacc6x01234567), vqmovn_s16(vacc7x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); vout4x01234567_5x01234567 = vmaxq_s8(vout4x01234567_5x01234567, voutput_min); vout6x01234567_7x01234567 = vmaxq_s8(vout6x01234567_7x01234567, voutput_min); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); vout4x01234567_5x01234567 = vminq_s8(vout4x01234567_5x01234567, voutput_max); vout6x01234567_7x01234567 = vminq_s8(vout6x01234567_7x01234567, voutput_max); if (nc >= 8) { // Main case where there the 8 columns fit in the destination. vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c4 + 0, vget_low_s8(vout4x01234567_5x01234567)); vst1_s8(c5 + 0, vget_high_s8(vout4x01234567_5x01234567)); vst1_s8(c6 + 0, vget_low_s8(vout6x01234567_7x01234567)); vst1_s8(c7 + 0, vget_high_s8(vout6x01234567_7x01234567)); // Advance to the next 8 columns. c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c4 = (int8_t*) ((uintptr_t) c4 + cn_stride); c5 = (int8_t*) ((uintptr_t) c5 + cn_stride); c6 = (int8_t*) ((uintptr_t) c6 + cn_stride); c7 = (int8_t*) ((uintptr_t) c7 + cn_stride); a0 = (const int8_t*) ((uintptr_t) a0 - kc); a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); a4 = (const int8_t*) ((uintptr_t) a4 - kc); a5 = (const int8_t*) ((uintptr_t) a5 - kc); a6 = (const int8_t*) ((uintptr_t) a6 - kc); a7 = (const int8_t*) ((uintptr_t) a7 - kc); nc -= 8; } else { // Final case where not all of the 8 columns fit in the destination. if (nc & 4) { vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c4, vreinterpretq_u32_s8(vout4x01234567_5x01234567), 0); c4 += 4; vst1q_lane_u32((void*) c5, vreinterpretq_u32_s8(vout4x01234567_5x01234567), 2); c5 += 4; vst1q_lane_u32((void*) c6, vreinterpretq_u32_s8(vout6x01234567_7x01234567), 0); c6 += 4; vst1q_lane_u32((void*) c7, vreinterpretq_u32_s8(vout6x01234567_7x01234567), 2); c7 += 4; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4); vout6x01234567_7x01234567 = vextq_s8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c4, vreinterpretq_u16_s8(vout4x01234567_5x01234567), 0); c4 += 2; vst1q_lane_u16((void*) c5, vreinterpretq_u16_s8(vout4x01234567_5x01234567), 4); c5 += 2; vst1q_lane_u16((void*) c6, vreinterpretq_u16_s8(vout6x01234567_7x01234567), 0); c6 += 2; vst1q_lane_u16((void*) c7, vreinterpretq_u16_s8(vout6x01234567_7x01234567), 4); c7 += 2; vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2); vout6x01234567_7x01234567 = vextq_s8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c4, vout4x01234567_5x01234567, 0); vst1q_lane_s8(c5, vout4x01234567_5x01234567, 8); vst1q_lane_s8(c6, vout6x01234567_7x01234567, 0); vst1q_lane_s8(c7, vout6x01234567_7x01234567, 8); } nc = 0; } } while (nc != 0); }
19,987
53.021622
132
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x16-minmax-fp32-neon-mlal-lane-prfm.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/igemm.h> #include <xnnpack/prefetch.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x16__neon_mlal_lane_prfm( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); xnn_prefetch_to_l1((const int8_t*) w + 448); xnn_prefetch_to_l1((const int8_t*) w + 512); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); } } } } } } } p -= 1 * sizeof(void*); } while (p != 0); // Post-accumulation work float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB); float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale89AB); const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscaleCDEF); const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias); vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias)); vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias)); vacc0x89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x89AB, vmagic_bias)); vacc0xCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpacc0xCDEF, vmagic_bias)); const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_output_zero_point); vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point); vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point); vacc0x89AB = vqsubq_s32(vacc0x89AB, vmagic_bias_less_output_zero_point); vacc0xCDEF = vqsubq_s32(vacc0xCDEF, vmagic_bias_less_output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF); int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)); int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neon.output_min); vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neon.output_max); vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max); if (nc >= 16) { vst1q_s8(c0 + 0, vout0x0123456789ABCDEF); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 16; } else { int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF); if (nc & 8) { vst1_s8(c0, vout0x01234567); c0 += 8; vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF); } if (nc & 4) { vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4; vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2; vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2); } if (nc & 1) { vst1_lane_s8(c0, vout0x01234567, 0); } nc = 0; } } while (nc != 0); }
17,241
53.220126
125
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x16-minmax-fp32-neon-mlal-lane.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/igemm.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x16__neon_mlal_lane( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); } } } } } } } p -= 1 * sizeof(void*); } while (p != 0); // Post-accumulation work float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB); float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale89AB); const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscaleCDEF); const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias); vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias)); vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias)); vacc0x89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x89AB, vmagic_bias)); vacc0xCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpacc0xCDEF, vmagic_bias)); const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_output_zero_point); vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point); vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point); vacc0x89AB = vqsubq_s32(vacc0x89AB, vmagic_bias_less_output_zero_point); vacc0xCDEF = vqsubq_s32(vacc0xCDEF, vmagic_bias_less_output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF); int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)); int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neon.output_min); vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neon.output_max); vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max); if (nc >= 16) { vst1q_s8(c0 + 0, vout0x0123456789ABCDEF); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 16; } else { int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF); if (nc & 8) { vst1_s8(c0, vout0x01234567); c0 += 8; vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF); } if (nc & 4) { vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4; vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2; vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2); } if (nc & 1) { vst1_lane_s8(c0, vout0x01234567, 0); } nc = 0; } } while (nc != 0); }
17,100
53.288889
125
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x16-minmax-fp32-neonv8-mlal-lane-prfm.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/igemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/prefetch.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x16__neonv8_mlal_lane_prfm( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); xnn_prefetch_to_l1((const int8_t*) w + 448); xnn_prefetch_to_l1((const int8_t*) w + 512); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); } } } } } } } p -= 1 * sizeof(void*); } while (p != 0); // Post-accumulation work float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB); float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale89AB); const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscaleCDEF); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc0x89AB = vcvtnq_s32_f32(vfpacc0x89AB); vacc0xCDEF = vcvtnq_s32_f32(vfpacc0xCDEF); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point); int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point); int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max); if (nc >= 16) { vst1q_s8(c0 + 0, vout0x0123456789ABCDEF); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 16; } else { int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF); if (nc & 8) { vst1_s8(c0, vout0x01234567); c0 += 8; vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF); } if (nc & 4) { vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4; vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2; vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2); } if (nc & 1) { vst1_lane_s8(c0, vout0x01234567, 0); } nc = 0; } } while (nc != 0); }
17,019
52.690852
114
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x16-minmax-fp32-neonv8-mlal-lane.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/igemm.h> #include <xnnpack/intrinsics-polyfill.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x16__neonv8_mlal_lane( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2); } } } } } } } p -= 1 * sizeof(void*); } while (p != 0); // Post-accumulation work float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB); float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale89AB); const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscaleCDEF); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc0x89AB = vcvtnq_s32_f32(vfpacc0x89AB); vacc0xCDEF = vcvtnq_s32_f32(vfpacc0xCDEF); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point); int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point); int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max); if (nc >= 16) { vst1q_s8(c0 + 0, vout0x0123456789ABCDEF); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 16; } else { int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF); if (nc & 8) { vst1_s8(c0, vout0x01234567); c0 += 8; vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF); } if (nc & 4) { vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4; vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2; vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2); } if (nc & 1) { vst1_lane_s8(c0, vout0x01234567, 0); } nc = 0; } } while (nc != 0); }
16,878
52.754777
114
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x16c4-minmax-fp32-neondot.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neondot.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/igemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x16c4__neondot( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; // Inner accumulation loop along the 16 columns. size_t k = kc; // 2x partial unrolled loop to load 8 bytes at a time. while (k >= 8 * sizeof(int8_t)) { // Load a 1x8 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8; // Load a 8x16 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 1x8 * 8x16 --> 1x16. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0); vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0); vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1); vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1); vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1); k -= 8 * sizeof(int8_t); } // Handle up to 4 final positions of `k` if XNN_UNLIKELY(k != 0) { // Load a 1x4 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); // Load a 4x16 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 1x4 * 4x16 --> 1x16. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0); vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0); } p -= 1 * sizeof(void*); } while (p != 0); float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123); float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567); float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB); float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF); const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123); const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567); const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale89AB); const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const float*) w + 4; vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscaleCDEF); vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123); vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567); vacc0x89AB = vcvtnq_s32_f32(vfpacc0x89AB); vacc0xCDEF = vcvtnq_s32_f32(vfpacc0xCDEF); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point); int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point); int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max); vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min); vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max); if (nc >= 16) { vst1q_s8(c0 + 0, vout0x0123456789ABCDEF); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 16; } else { int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF); if (nc & 8) { vst1_s8(c0, vout0x01234567); c0 += 8; vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF); } if (nc & 4) { vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4; vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2; vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2); } if (nc & 1) { vst1_lane_s8(c0, vout0x01234567, 0); } nc = 0; } } while (nc != 0); }
7,300
40.016854
130
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x1c4-minmax-fp32-armsimd32.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-armsimd32.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_acle.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x1c4__armsimd32( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; const float vmagic_bias = params->fp32_armsimd32.magic_bias; do { int32_t vacc0x0 = ((const int32_t*) w)[0]; w = (const void*) ((const int32_t*) w + 1); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const int8x4_t va0 = (int8x4_t) unaligned_load_s32(a0); a0 += 4; const int16x2_t va0c02 = __sxtb16(va0); const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); const int8x4_t vb0 = *((const int8x4_t*) w); w = (const int8_t*) w + 4; const int16x2_t vb0c02 = __sxtb16(vb0); vacc0x0 = __smlad(va0c02, vb0c02, vacc0x0); const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); k -= 4 * sizeof(int8_t); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; w = (const void*) ((const float*) w + 1); vfpacc0x0 += vmagic_bias; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0); const int32_t vmagic_bias_less_zero_point = params->fp32_armsimd32.magic_bias_less_zero_point; vout0x0 = __qsub(vout0x0, vmagic_bias_less_zero_point); vout0x0 = __ssat(vout0x0, 8); const uint32_t vout0 = (uint32_t) vout0x0; uint32_t vout = vout0; const int8x4_t voutput_min = (int8x4_t) params->fp32_armsimd32.output_min; __ssub8((int8x4_t) vout, voutput_min); vout = (uint32_t) __sel((uint8x4_t) vout, (uint8x4_t) voutput_min); const int8x4_t voutput_max = (int8x4_t) params->fp32_armsimd32.output_max; __ssub8((int8x4_t) vout, voutput_max); vout = (uint32_t) __sel((uint8x4_t) voutput_max, (uint8x4_t) vout); *c0 = (int8_t) vout; c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 1; } while (nc != 0); }
3,176
26.387931
98
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x2-minmax-fp32-scalar-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x2__scalar_fmagic( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); w = (const void*) ((const int32_t*) w + 2); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const void*) ((const int8_t*) w + 2); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; k -= sizeof(int8_t); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; const float vscale0 = unaligned_indexed_load_f32(w, 0); vfpacc0x0 *= vscale0; const float vscale1 = unaligned_indexed_load_f32(w, 1); vfpacc0x1 *= vscale1; w = (const void*) ((const float*) w + 2); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
3,349
28.130435
116
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x2-minmax-fp32-scalar-imagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x2__scalar_imagic( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); w = (const void*) ((const int32_t*) w + 2); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const void*) ((const int8_t*) w + 2); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; k -= sizeof(int8_t); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; const float vscale0 = unaligned_indexed_load_f32(w, 0); vfpacc0x0 *= vscale0; const float vscale1 = unaligned_indexed_load_f32(w, 1); vfpacc0x1 *= vscale1; w = (const void*) ((const float*) w + 2); const float vmagic_bias = params->fp32_scalar_imagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0); int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1); const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min; vout0x0 = math_max_s32(vout0x0, vmagic_min); vout0x1 = math_max_s32(vout0x1, vmagic_min); const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max; vout0x0 = math_min_s32(vout0x0, vmagic_max); vout0x1 = math_min_s32(vout0x1, vmagic_max); const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point; vout0x0 -= vmagic_bias_less_zero_point; vout0x1 -= vmagic_bias_less_zero_point; if XNN_LIKELY(nc >= 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
3,202
26.144068
102
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x2-minmax-fp32-scalar-lrintf.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <math.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x2__scalar_lrintf( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); w = (const void*) ((const int32_t*) w + 2); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const void*) ((const int8_t*) w + 2); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; k -= sizeof(int8_t); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; const float vscale0 = unaligned_indexed_load_f32(w, 0); vfpacc0x0 *= vscale0; const float vscale1 = unaligned_indexed_load_f32(w, 1); vfpacc0x1 *= vscale1; w = (const void*) ((const float*) w + 2); const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0); const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1); const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point; int32_t vout0x0 = vrndacc0x0 + voutput_zero_point; int32_t vout0x1 = vrndacc0x1 + voutput_zero_point; if XNN_LIKELY(nc >= 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
3,242
27.2
100
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x2-minmax-fp32-wasm-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x2__wasm_fmagic( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); w = (const void*) ((const int32_t*) w + 2); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const void*) ((const int8_t*) w + 2); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; k -= sizeof(int8_t); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; const float vscale0 = unaligned_indexed_load_f32(w, 0); vfpacc0x0 *= vscale0; const float vscale1 = unaligned_indexed_load_f32(w, 1); vfpacc0x1 *= vscale1; w = (const void*) ((const float*) w + 2); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
3,387
28.46087
116
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x2c4-minmax-fp32-armsimd32.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-armsimd32.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_acle.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x2c4__armsimd32( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; const float vmagic_bias = params->fp32_armsimd32.magic_bias; do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; w = (const void*) ((const int32_t*) w + 2); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const int8x4_t va0 = (int8x4_t) unaligned_load_s32(a0); a0 += 4; const int16x2_t va0c02 = __sxtb16(va0); const int16x2_t va0c13 = __sxtb16(__ror(va0, 8)); const int8x4_t vb0 = *((const int8x4_t*) w); w = (const int8_t*) w + 4; const int16x2_t vb0c02 = __sxtb16(vb0); vacc0x0 = __smlad(va0c02, vb0c02, vacc0x0); const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8)); vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0); const int8x4_t vb1 = *((const int8x4_t*) w); w = (const int8_t*) w + 4; const int16x2_t vb1c02 = __sxtb16(vb1); vacc0x1 = __smlad(va0c02, vb1c02, vacc0x1); const int16x2_t vb1c13 = __sxtb16(__ror(vb1, 8)); vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1); k -= 4 * sizeof(int8_t); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; w = (const void*) ((const float*) w + 2); vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0); int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1); const int32_t vmagic_bias_less_zero_point = params->fp32_armsimd32.magic_bias_less_zero_point; vout0x0 = __qsub(vout0x0, vmagic_bias_less_zero_point); vout0x1 = __qsub(vout0x1, vmagic_bias_less_zero_point); vout0x0 = __ssat(vout0x0, 8); vout0x1 = __ssat(vout0x1, 8); const uint32_t vout0 = (uint32_t) (uint8_t) vout0x0 | ((uint32_t) vout0x1 << 8); uint32_t vout = vout0; const int8x4_t voutput_min = (int8x4_t) params->fp32_armsimd32.output_min; __ssub8((int8x4_t) vout, voutput_min); vout = (uint32_t) __sel((uint8x4_t) vout, (uint8x4_t) voutput_min); const int8x4_t voutput_max = (int8x4_t) params->fp32_armsimd32.output_max; __ssub8((int8x4_t) vout, voutput_max); vout = (uint32_t) __sel((uint8x4_t) voutput_max, (uint8x4_t) vout); if XNN_LIKELY(nc >= 2) { unaligned_store_u16(c0, (uint16_t) vout); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 2; } else { *c0 = (int8_t) vout; nc = 0; } } while (nc != 0); }
3,970
27.985401
98
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4-minmax-fp32-scalar-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4__scalar_fmagic( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const void*) ((const int8_t*) w + 4); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; k -= sizeof(int8_t); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; const float vscale2 = ((const float*) w)[2]; vfpacc0x2 *= vscale2; const float vscale3 = ((const float*) w)[3]; vfpacc0x3 *= vscale3; w = (const void*) ((const float*) w + 4); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point); vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point); vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc0x2 += vmagic_bias; vfpacc0x3 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point; int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 4) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; } if (nc & 1) { c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
4,522
30.852113
116
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4-minmax-fp32-scalar-imagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4__scalar_imagic( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const void*) ((const int8_t*) w + 4); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; k -= sizeof(int8_t); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; const float vscale2 = ((const float*) w)[2]; vfpacc0x2 *= vscale2; const float vscale3 = ((const float*) w)[3]; vfpacc0x3 *= vscale3; w = (const void*) ((const float*) w + 4); const float vmagic_bias = params->fp32_scalar_imagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc0x2 += vmagic_bias; vfpacc0x3 += vmagic_bias; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0); int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1); int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2); int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3); const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min; vout0x0 = math_max_s32(vout0x0, vmagic_min); vout0x1 = math_max_s32(vout0x1, vmagic_min); vout0x2 = math_max_s32(vout0x2, vmagic_min); vout0x3 = math_max_s32(vout0x3, vmagic_min); const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max; vout0x0 = math_min_s32(vout0x0, vmagic_max); vout0x1 = math_min_s32(vout0x1, vmagic_max); vout0x2 = math_min_s32(vout0x2, vmagic_max); vout0x3 = math_min_s32(vout0x3, vmagic_max); const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point; vout0x0 -= vmagic_bias_less_zero_point; vout0x1 -= vmagic_bias_less_zero_point; vout0x2 -= vmagic_bias_less_zero_point; vout0x3 -= vmagic_bias_less_zero_point; if XNN_LIKELY(nc >= 4) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; } if (nc & 1) { c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
4,305
28.292517
102
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4-minmax-fp32-scalar-lrintf.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <math.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4__scalar_lrintf( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const void*) ((const int8_t*) w + 4); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; k -= sizeof(int8_t); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; const float vscale2 = ((const float*) w)[2]; vfpacc0x2 *= vscale2; const float vscale3 = ((const float*) w)[3]; vfpacc0x3 *= vscale3; w = (const void*) ((const float*) w + 4); const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point); vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point); vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point); const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0); const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1); const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2); const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3); const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point; int32_t vout0x0 = vrndacc0x0 + voutput_zero_point; int32_t vout0x1 = vrndacc0x1 + voutput_zero_point; int32_t vout0x2 = vrndacc0x2 + voutput_zero_point; int32_t vout0x3 = vrndacc0x3 + voutput_zero_point; if XNN_LIKELY(nc >= 4) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; } if (nc & 1) { c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
4,391
29.929577
100
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4-minmax-fp32-wasm-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4__wasm_fmagic( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 1); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (1 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } a += 1; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const void*) ((const int8_t*) w + 4); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; k -= sizeof(int8_t); } while (k != 0); p -= 1 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; const float vscale0 = ((const float*) w)[0]; vfpacc0x0 *= vscale0; const float vscale1 = ((const float*) w)[1]; vfpacc0x1 *= vscale1; const float vscale2 = ((const float*) w)[2]; vfpacc0x2 *= vscale2; const float vscale3 = ((const float*) w)[3]; vfpacc0x3 *= vscale3; w = (const void*) ((const float*) w + 4); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc0x2 = __builtin_wasm_max_f32(vfpacc0x2, voutput_min_less_zero_point); vfpacc0x3 = __builtin_wasm_max_f32(vfpacc0x3, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc0x2 = __builtin_wasm_min_f32(vfpacc0x2, voutput_max_less_zero_point); vfpacc0x3 = __builtin_wasm_min_f32(vfpacc0x3, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc0x2 += vmagic_bias; vfpacc0x3 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point; int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 4) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; } if (nc & 1) { c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
4,600
31.401408
116
c