repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c8-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c8-neon-mull.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x8 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x9 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x10 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x11 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x12 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x13 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x14 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x15 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
size_t k = kc;
// 2x partial unrolled loop to load 16 bytes at a time using MLA.
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb8x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb9x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb10x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb11x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb12x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb13x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb14x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb15x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0);
vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1);
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0);
vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1);
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0);
vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1);
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0);
vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1);
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0);
vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1);
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0);
vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1);
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0);
vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1);
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0);
vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1);
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
const int8x8_t vb8x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x8 = vmull_s8(vb8x0, va0x0);
vprod0x8 = vmlal_s8(vprod0x8, vb8x1, va0x1);
vacc0x8 = vpadalq_s16(vacc0x8, vprod0x8);
const int8x8_t vb9x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x9 = vmull_s8(vb9x0, va0x0);
vprod0x9 = vmlal_s8(vprod0x9, vb9x1, va0x1);
vacc0x9 = vpadalq_s16(vacc0x9, vprod0x9);
const int8x8_t vb10x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x10 = vmull_s8(vb10x0, va0x0);
vprod0x10 = vmlal_s8(vprod0x10, vb10x1, va0x1);
vacc0x10 = vpadalq_s16(vacc0x10, vprod0x10);
const int8x8_t vb11x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x11 = vmull_s8(vb11x0, va0x0);
vprod0x11 = vmlal_s8(vprod0x11, vb11x1, va0x1);
vacc0x11 = vpadalq_s16(vacc0x11, vprod0x11);
const int8x8_t vb12x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x12 = vmull_s8(vb12x0, va0x0);
vprod0x12 = vmlal_s8(vprod0x12, vb12x1, va0x1);
vacc0x12 = vpadalq_s16(vacc0x12, vprod0x12);
const int8x8_t vb13x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x13 = vmull_s8(vb13x0, va0x0);
vprod0x13 = vmlal_s8(vprod0x13, vb13x1, va0x1);
vacc0x13 = vpadalq_s16(vacc0x13, vprod0x13);
const int8x8_t vb14x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x14 = vmull_s8(vb14x0, va0x0);
vprod0x14 = vmlal_s8(vprod0x14, vb14x1, va0x1);
vacc0x14 = vpadalq_s16(vacc0x14, vprod0x14);
const int8x8_t vb15x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x15 = vmull_s8(vb15x0, va0x0);
vprod0x15 = vmlal_s8(vprod0x15, vb15x1, va0x1);
vacc0x15 = vpadalq_s16(vacc0x15, vprod0x15);
k -= 16 * sizeof(int8_t);
}
// Handle 8 bytes at a time using MUL.
if (k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
const int8x8_t vb8 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x8 = vmull_s8(vb8, va0);
vacc0x8 = vpadalq_s16(vacc0x8, vprod0x8);
const int8x8_t vb9 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x9 = vmull_s8(vb9, va0);
vacc0x9 = vpadalq_s16(vacc0x9, vprod0x9);
const int8x8_t vb10 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x10 = vmull_s8(vb10, va0);
vacc0x10 = vpadalq_s16(vacc0x10, vprod0x10);
const int8x8_t vb11 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x11 = vmull_s8(vb11, va0);
vacc0x11 = vpadalq_s16(vacc0x11, vprod0x11);
const int8x8_t vb12 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x12 = vmull_s8(vb12, va0);
vacc0x12 = vpadalq_s16(vacc0x12, vprod0x12);
const int8x8_t vb13 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x13 = vmull_s8(vb13, va0);
vacc0x13 = vpadalq_s16(vacc0x13, vprod0x13);
const int8x8_t vb14 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x14 = vmull_s8(vb14, va0);
vacc0x14 = vpadalq_s16(vacc0x14, vprod0x14);
const int8x8_t vb15 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x15 = vmull_s8(vb15, va0);
vacc0x15 = vpadalq_s16(vacc0x15, vprod0x15);
k -= 8 * sizeof(int8_t);
}
#if XNN_ARCH_ARM64
const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
const int32x4_t vsum0x89 = vpaddq_s32(vacc0x8, vacc0x9);
const int32x4_t vsum0xAB = vpaddq_s32(vacc0x10, vacc0x11);
const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13);
const int32x4_t vsum0xEF = vpaddq_s32(vacc0x14, vacc0x15);
int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vsum0x89, vsum0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF);
#else
const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
const int32x2_t vpsum0x8 = vadd_s32(vget_low_s32(vacc0x8), vget_high_s32(vacc0x8));
const int32x2_t vpsum0x9 = vadd_s32(vget_low_s32(vacc0x9), vget_high_s32(vacc0x9));
const int32x2_t vpsum0xA = vadd_s32(vget_low_s32(vacc0x10), vget_high_s32(vacc0x10));
const int32x2_t vpsum0xB = vadd_s32(vget_low_s32(vacc0x11), vget_high_s32(vacc0x11));
const int32x2_t vsum0x89 = vpadd_s32(vpsum0x8, vpsum0x9);
const int32x2_t vsum0xAB = vpadd_s32(vpsum0xA, vpsum0xB);
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB );
const int32x2_t vpsum0xC = vadd_s32(vget_low_s32(vacc0x12), vget_high_s32(vacc0x12));
const int32x2_t vpsum0xD = vadd_s32(vget_low_s32(vacc0x13), vget_high_s32(vacc0x13));
const int32x2_t vpsum0xE = vadd_s32(vget_low_s32(vacc0x14), vget_high_s32(vacc0x14));
const int32x2_t vpsum0xF = vadd_s32(vget_low_s32(vacc0x15), vget_high_s32(vacc0x15));
const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD);
const int32x2_t vsum0xEF = vpadd_s32(vpsum0xE, vpsum0xF);
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF );
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,207
| 54.512195
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c8-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c8-neon-mull.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mull(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x8 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x9 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x10 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x11 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x12 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x13 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x14 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x15 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
size_t k = kc;
// Handle 8 bytes at a time using MUL.
while (k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
const int8x8_t vb8 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x8 = vmull_s8(vb8, va0);
vacc0x8 = vpadalq_s16(vacc0x8, vprod0x8);
const int8x8_t vb9 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x9 = vmull_s8(vb9, va0);
vacc0x9 = vpadalq_s16(vacc0x9, vprod0x9);
const int8x8_t vb10 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x10 = vmull_s8(vb10, va0);
vacc0x10 = vpadalq_s16(vacc0x10, vprod0x10);
const int8x8_t vb11 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x11 = vmull_s8(vb11, va0);
vacc0x11 = vpadalq_s16(vacc0x11, vprod0x11);
const int8x8_t vb12 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x12 = vmull_s8(vb12, va0);
vacc0x12 = vpadalq_s16(vacc0x12, vprod0x12);
const int8x8_t vb13 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x13 = vmull_s8(vb13, va0);
vacc0x13 = vpadalq_s16(vacc0x13, vprod0x13);
const int8x8_t vb14 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x14 = vmull_s8(vb14, va0);
vacc0x14 = vpadalq_s16(vacc0x14, vprod0x14);
const int8x8_t vb15 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x15 = vmull_s8(vb15, va0);
vacc0x15 = vpadalq_s16(vacc0x15, vprod0x15);
k -= 8 * sizeof(int8_t);
}
#if XNN_ARCH_ARM64
const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
const int32x4_t vsum0x89 = vpaddq_s32(vacc0x8, vacc0x9);
const int32x4_t vsum0xAB = vpaddq_s32(vacc0x10, vacc0x11);
const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13);
const int32x4_t vsum0xEF = vpaddq_s32(vacc0x14, vacc0x15);
int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vsum0x89, vsum0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF);
#else
const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
const int32x2_t vpsum0x8 = vadd_s32(vget_low_s32(vacc0x8), vget_high_s32(vacc0x8));
const int32x2_t vpsum0x9 = vadd_s32(vget_low_s32(vacc0x9), vget_high_s32(vacc0x9));
const int32x2_t vpsum0xA = vadd_s32(vget_low_s32(vacc0x10), vget_high_s32(vacc0x10));
const int32x2_t vpsum0xB = vadd_s32(vget_low_s32(vacc0x11), vget_high_s32(vacc0x11));
const int32x2_t vsum0x89 = vpadd_s32(vpsum0x8, vpsum0x9);
const int32x2_t vsum0xAB = vpadd_s32(vpsum0xA, vpsum0xB);
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB );
const int32x2_t vpsum0xC = vadd_s32(vget_low_s32(vacc0x12), vget_high_s32(vacc0x12));
const int32x2_t vpsum0xD = vadd_s32(vget_low_s32(vacc0x13), vget_high_s32(vacc0x13));
const int32x2_t vpsum0xE = vadd_s32(vget_low_s32(vacc0x14), vget_high_s32(vacc0x14));
const int32x2_t vpsum0xF = vadd_s32(vget_low_s32(vacc0x15), vget_high_s32(vacc0x15));
const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD);
const int32x2_t vsum0xEF = vpadd_s32(vpsum0xE, vpsum0xF);
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF );
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,330
| 50.594142
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x1c4-minmax-fp32-armsimd32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const float vscale = params->fp32_armsimd32.scale;
const float vmagic_bias = params->fp32_armsimd32.magic_bias;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
w = (const void*) ((const int32_t*) w + 1);
size_t k = kc;
do {
const int8x4_t va0 = (int8x4_t) unaligned_load_s32(a0); a0 += 4;
const int16x2_t va0c02 = __sxtb16(va0);
const int16x2_t va0c13 = __sxtb16(__ror(va0, 8));
const int8x4_t vb0 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb0c02 = __sxtb16(vb0);
vacc0x0 = __smlad(va0c02, vb0c02, vacc0x0);
const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8));
vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0);
k -= 4 * sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
vfpacc0x0 *= vscale;
vfpacc0x0 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
const int32_t vmagic_bias_less_zero_point = params->fp32_armsimd32.magic_bias_less_zero_point;
vout0x0 = __qsub(vout0x0, vmagic_bias_less_zero_point);
vout0x0 = __ssat(vout0x0, 8);
const uint32_t vout0 = (uint32_t) vout0x0;
uint32_t vout = vout0;
const int8x4_t voutput_min = (int8x4_t) params->fp32_armsimd32.output_min;
__ssub8((int8x4_t) vout, voutput_min);
vout = (uint32_t) __sel((uint8x4_t) vout, (uint8x4_t) voutput_min);
const int8x4_t voutput_max = (int8x4_t) params->fp32_armsimd32.output_max;
__ssub8((int8x4_t) vout, voutput_max);
vout = (uint32_t) __sel((uint8x4_t) voutput_max, (uint8x4_t) vout);
*c0 = (int8_t) vout;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 1;
} while (nc != 0);
}
| 2,684
| 26.121212
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x2-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,763
| 27.494845
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x2-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale = params->fp32_scalar_imagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,616
| 25.17
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x2-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,656
| 26.391753
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x2-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x2__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,801
| 27.886598
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x2-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding;
const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding;
const uint32_t vshift = params->rndnu_scalar.shift;
int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift);
int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift);
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
vout0x0 += voutput_zero_point;
vout0x1 += voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,732
| 27.768421
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x2c4-minmax-fp32-armsimd32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const float vscale = params->fp32_armsimd32.scale;
const float vmagic_bias = params->fp32_armsimd32.magic_bias;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
w = (const void*) ((const int32_t*) w + 2);
size_t k = kc;
do {
const int8x4_t va0 = (int8x4_t) unaligned_load_s32(a0); a0 += 4;
const int16x2_t va0c02 = __sxtb16(va0);
const int16x2_t va0c13 = __sxtb16(__ror(va0, 8));
const int8x4_t vb0 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb0c02 = __sxtb16(vb0);
vacc0x0 = __smlad(va0c02, vb0c02, vacc0x0);
const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8));
vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0);
const int8x4_t vb1 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb1c02 = __sxtb16(vb1);
vacc0x1 = __smlad(va0c02, vb1c02, vacc0x1);
const int16x2_t vb1c13 = __sxtb16(__ror(vb1, 8));
vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1);
k -= 4 * sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
const int32_t vmagic_bias_less_zero_point = params->fp32_armsimd32.magic_bias_less_zero_point;
vout0x0 = __qsub(vout0x0, vmagic_bias_less_zero_point);
vout0x1 = __qsub(vout0x1, vmagic_bias_less_zero_point);
vout0x0 = __ssat(vout0x0, 8);
vout0x1 = __ssat(vout0x1, 8);
const uint32_t vout0 = (uint32_t) (uint8_t) vout0x0 | ((uint32_t) vout0x1 << 8);
uint32_t vout = vout0;
const int8x4_t voutput_min = (int8x4_t) params->fp32_armsimd32.output_min;
__ssub8((int8x4_t) vout, voutput_min);
vout = (uint32_t) __sel((uint8x4_t) vout, (uint8x4_t) voutput_min);
const int8x4_t voutput_max = (int8x4_t) params->fp32_armsimd32.output_max;
__ssub8((int8x4_t) vout, voutput_max);
vout = (uint32_t) __sel((uint8x4_t) voutput_max, (uint8x4_t) vout);
if XNN_LIKELY(nc >= 2) {
unaligned_store_u16(c0, (uint16_t) vout);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
*c0 = (int8_t) vout;
nc = 0;
}
} while (nc != 0);
}
| 3,418
| 27.731092
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,850
| 30.565574
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale = params->fp32_scalar_imagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2);
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
vout0x2 = math_max_s32(vout0x2, vmagic_min);
vout0x3 = math_max_s32(vout0x3, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
vout0x2 = math_min_s32(vout0x2, vmagic_max);
vout0x3 = math_min_s32(vout0x3, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
vout0x2 -= vmagic_bias_less_zero_point;
vout0x3 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,633
| 27.614173
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,719
| 29.491803
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = __builtin_wasm_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = __builtin_wasm_max_f32(vfpacc0x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = __builtin_wasm_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = __builtin_wasm_min_f32(vfpacc0x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,928
| 31.204918
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding;
const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding;
const int64_t vextacc0x2 = math_mulext_s32(vacc0x2, vmultiplier) + vrounding;
const int64_t vextacc0x3 = math_mulext_s32(vacc0x3, vmultiplier) + vrounding;
const uint32_t vshift = params->rndnu_scalar.shift;
int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift);
int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift);
int32_t vout0x2 = (int32_t) math_asr_s64(vextacc0x2, vshift);
int32_t vout0x3 = (int32_t) math_asr_s64(vextacc0x3, vshift);
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
vout0x0 += voutput_zero_point;
vout0x1 += voutput_zero_point;
vout0x2 += voutput_zero_point;
vout0x3 += voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,787
| 31.101695
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,792
| 31.385135
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,906
| 31.713333
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 5,091
| 32.721854
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 5,181
| 33.317881
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,794
| 31.398649
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,908
| 31.726667
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01);
const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23);
const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const v128_t vxb0 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(int8_t)) {
const v128_t vxb1 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(int8_t)) {
const v128_t vxb2 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
}
}
}
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,553
| 30.191781
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vxb0 = wasm_i16x8_load8x8(w);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const v128_t vxb0 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(int8_t)) {
const v128_t vxb1 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(int8_t)) {
const v128_t vxb2 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
}
}
}
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,453
| 29.930556
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,754
| 30.282895
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,868
| 30.616883
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-xw-minmax-fp32-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__avx(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vxb1 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vxb2 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,548
| 30.811189
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-xw-minmax-fp32-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__sse2(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vxb1 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vxb2 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 4,655
| 31.333333
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-xw-minmax-fp32-sse41.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__sse41(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vxb1 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vxb2 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,550
| 30.825175
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-xw-minmax-fp32-wasmsimd-dot16x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__wasmsimd_dot16x2(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vxb0 = wasm_v128_load(w);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
const v128_t vxb1 = wasm_v128_load((const int16_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
const v128_t vxb2 = wasm_v128_load((const int16_t*) w + 16);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
const v128_t vxb3 = wasm_v128_load((const int16_t*) w + 24);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const v128_t vxb0 = wasm_v128_load(w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(int8_t)) {
const v128_t vxb1 = wasm_v128_load(w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(int8_t)) {
const v128_t vxb2 = wasm_v128_load(w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
}
}
}
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,430
| 29.770833
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2-xw-minmax-fp32-xop.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__xop(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
if (k > 2 * sizeof(int8_t)) {
const __m128i vxb1 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
if (k > 4 * sizeof(int8_t)) {
const __m128i vxb2 = _mm_load_si128((const __m128i*) w);
w = (const void*) ((const int16_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,510
| 29.687075
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2s4__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,575
| 29.564103
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2s4__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,689
| 30.008403
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2s4__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 3,778
| 30.491667
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2s4__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 3,868
| 31.241667
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2s4__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,577
| 29.581197
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2s4__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,691
| 30.02521
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2s4__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01);
const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23);
const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
w = (const int8_t*) w + 32;
k -= 8 * sizeof(int8_t);
} while (k != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,589
| 29.168067
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2s4__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vxb0 = wasm_i16x8_load8x8(w);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
w = (const int8_t*) w + 32;
k -= 8 * sizeof(int8_t);
} while (k != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,489
| 28.82906
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2s4__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,572
| 28.528926
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c2s4__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,686
| 28.97561
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-xw-minmax-fp32-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2s4__avx(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,487
| 29.330435
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-xw-minmax-fp32-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2s4__sse2(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 3,570
| 29.784483
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-xw-minmax-fp32-sse41.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2s4__sse41(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,489
| 29.347826
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c2s4-xw-minmax-fp32-xop.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2s4__xop(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
w = (const void*) ((const int16_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,484
| 28.285714
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c8__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,766
| 29.626016
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c8__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,884
| 29.590551
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 4,089
| 32.52459
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 4,183
| 32.741935
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c8__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,715
| 30.226891
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c8__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,833
| 30.170732
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-minmax-fp32-ssse3-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c8__ssse3_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 3,917
| 31.114754
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-minmax-fp32-ssse3-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c8__ssse3_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 4,011
| 31.354839
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c8__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,710
| 29.170732
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x4c8__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,828
| 29.149606
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-xw-minmax-fp32-avx.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c8__avx(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int16_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,682
| 28.943089
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-xw-minmax-fp32-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c8__sse2(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int16_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 3,885
| 31.383333
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-xw-minmax-fp32-sse41.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c8__sse41(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int16_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,631
| 29.521008
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-xw-minmax-fp32-ssse3.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c8__ssse3(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int16_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 3,713
| 29.95
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x4c8-xw-minmax-fp32-xop.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c8__xop(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
w = (const void*) ((const int16_t*) w + 32);
k += 8 * sizeof(int8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,626
| 28.487805
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8-minmax-rndnu-neon-mlal-lane-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/prefetch.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8__neon_mlal_lane_prfm(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
xnn_prefetch_to_l1((const int8_t*) w + 448);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,506
| 43.013889
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8-minmax-rndnu-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,420
| 43.023364
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8-minmax-rndnu-neon-mull-addw-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mull-addw-dup.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8__neon_mull_addw_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va0, 0));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c0));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c0));
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va0, 1));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c1));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c1));
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va0, 2));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c2));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c2));
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va0, 3));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c3));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c3));
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va0, 4));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c4));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c4));
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va0, 5));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c5));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c5));
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va0, 6));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c6));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c6));
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c7 = vmull_s8(vb01234567c7, vdup_lane_s8(va0, 7));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c7));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c7));
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va0, 0));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c0));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c0));
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va0, 1));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c1));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c1));
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va0, 2));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c2));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c2));
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va0, 3));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c3));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c3));
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va0, 4));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c4));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c4));
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va0, 5));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c5));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c5));
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va0, 6));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c6));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c6));
}
}
}
}
}
}
}
// Post-accumulation work
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
// Main case where there the 8 columns fit in the destination.
vst1_s8(c0 + 0, vout0x01234567);
// Advance to the next 8 columns.
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,365
| 43.6
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c16-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c16-neon-mlal.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c16__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 16 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
// KC loop of 16
size_t k = kc;
while (k != 0) {
const int8x16_t va0 = vld1q_s8(a0); a0 += 16;
const int8x16_t vb0 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb1 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb2 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb3 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb4 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb5 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb6 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb7 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
int16x8_t vprod0x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va0));
vprod0x0 = vmlal_s8(vprod0x0, vget_high_s8(vb0), vget_high_s8(va0));
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
int16x8_t vprod0x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va0));
vprod0x1 = vmlal_s8(vprod0x1, vget_high_s8(vb1), vget_high_s8(va0));
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
int16x8_t vprod0x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va0));
vprod0x2 = vmlal_s8(vprod0x2, vget_high_s8(vb2), vget_high_s8(va0));
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
int16x8_t vprod0x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va0));
vprod0x3 = vmlal_s8(vprod0x3, vget_high_s8(vb3), vget_high_s8(va0));
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
int16x8_t vprod0x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va0));
vprod0x4 = vmlal_s8(vprod0x4, vget_high_s8(vb4), vget_high_s8(va0));
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
int16x8_t vprod0x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va0));
vprod0x5 = vmlal_s8(vprod0x5, vget_high_s8(vb5), vget_high_s8(va0));
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
int16x8_t vprod0x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va0));
vprod0x6 = vmlal_s8(vprod0x6, vget_high_s8(vb6), vget_high_s8(va0));
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
int16x8_t vprod0x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va0));
vprod0x7 = vmlal_s8(vprod0x7, vget_high_s8(vb7), vget_high_s8(va0));
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
k -= 16 * sizeof(int8_t);
}
#if XNN_ARCH_ARM64
const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
#else
const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,033
| 45.172414
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-fp32-neon-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,684
| 47.285124
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-fp32-neon-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4_t va00 = vld1_dup_s16((const void*)a0);
const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,895
| 46.394422
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-fp32-neon-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,587
| 46.297959
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-fp32-neon-mlal-ld4r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld4r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,364
| 45.96281
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-fp32-neonv8-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,542
| 46.896266
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4_t va00 = vld1_dup_s16((const void*)a0);
const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,753
| 46.016
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,445
| 45.909836
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld4r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,222
| 45.568465
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-rndnu-neon-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,725
| 47.255144
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-rndnu-neon-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4_t va00 = vld1_dup_s16((const void*)a0);
const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,936
| 46.369048
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-rndnu-neon-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,628
| 46.272358
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-rndnu-neon-mlal-ld4r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld4r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,405
| 45.938272
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-rndnu-neon-mull-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,607
| 40.347826
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-rndnu-neon-mull-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int16x4_t va00 = vld1_dup_s16((const void*)a0);
const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,674
| 40.042781
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-rndnu-neon-mull-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,574
| 39.945946
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2-minmax-rndnu-neon-mull-ld4r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld4r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,501
| 39.771739
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2s4-minmax-fp32-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2s4__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,146
| 40.779487
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2s4-minmax-fp32-neonv8-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2s4__neonv8_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,008
| 40.283505
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2s4-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2s4__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,191
| 40.795918
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c2s4-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2s4__neon_mull(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
size_t k = kc;
do {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
} while (k != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,163
| 35.111888
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-fp32-neon-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4__neon_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 0));
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 1));
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,981
| 46.336207
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-fp32-neon-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4__neon_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2_t va00x0 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x0 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int32x2_t va00x1 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x1 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va00x1);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va01x1);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2_t va00 = vld1_dup_s32((const void*)a0);
const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va00);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va01);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,016
| 45.880851
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-fp32-neon-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4__neon_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2x2_t va0x0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int32x2x2_t va0x1 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va0x1.val[0]);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va0x1.val[1]);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,853
| 45.784483
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-fp32-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
// Loop over groups of 8 columns.
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 1x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
// Load a 8x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x8 * 8x8 --> 1x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 1x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
// Load a 4x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x4 * 4x8 --> 1x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
// Main case where there the 8 columns fit in the destination.
vst1_s8(c0 + 0, vout0x01234567);
// Advance to the next 8 columns.
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,975
| 34.29078
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-fp32-neonv8-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4__neonv8_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 0));
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 1));
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,839
| 45.926407
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-fp32-neonv8-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4__neonv8_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2_t va00x0 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x0 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int32x2_t va00x1 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x1 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va00x1);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va01x1);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2_t va00 = vld1_dup_s32((const void*)a0);
const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va00);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va01);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,874
| 45.474359
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-fp32-neonv8-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4__neonv8_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2x2_t va0x0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int32x2x2_t va0x1 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va0x1.val[0]);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va0x1.val[1]);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,711
| 45.372294
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-rndnu-neon-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4__neon_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 0));
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 1));
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,022
| 46.309013
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-rndnu-neon-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4__neon_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2_t va00x0 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x0 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int32x2_t va00x1 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x1 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va00x1);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va01x1);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2_t va00 = vld1_dup_s32((const void*)a0);
const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va00);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va01);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,057
| 45.855932
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-rndnu-neon-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4__neon_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2x2_t va0x0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int32x2x2_t va0x1 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va0x1.val[0]);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va0x1.val[1]);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,894
| 45.759657
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-rndnu-neon-mull-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4__neon_mull_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,457
| 40.664804
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-rndnu-neon-mull-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4__neon_mull_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int32x2_t va00 = vld1_dup_s32((const void*)a0);
const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va00);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va01);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,468
| 40.494444
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-rndnu-neon-mull-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4__neon_mull_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,415
| 40.430168
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
// Loop over groups of 8 columns.
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 1x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
// Load a 8x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x8 * 8x8 --> 1x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 1x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
// Load a 4x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x4 * 4x8 --> 1x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
// Main case where there the 8 columns fit in the destination.
vst1_s8(c0 + 0, vout0x01234567);
// Advance to the next 8 columns.
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,160
| 35.090909
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4s2-minmax-fp32-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
va0x1 = vext_s8(va0x1, va0x1, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,529
| 41.019704
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4s2-minmax-fp32-neonv8-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
va0x1 = vext_s8(va0x1, va0x1, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,387
| 40.524752
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4s2-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
va0x1 = vext_s8(va0x1, va0x1, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,570
| 41.014706
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x8c4s2-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
size_t k = kc;
do {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
} while (k != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
// Final case where not all of the 8 columns fit in the destination.
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,862
| 36.825806
| 99
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.