repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-neon-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c16(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(uint8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
const int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
const int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neon.output_max);
for (; channels >= 16; channels -= 16) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 14,661
| 45.398734
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-neon-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c24(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
const int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
const int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
const int32x4_t vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumGHIJKLMN)));
const int32x4_t vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumGHIJKLMN)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(b + 16);
int32x4_t vaccKLMN = vld1q_s32(b + 20);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vsumGHIJKLMN)));
vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vsumGHIJKLMN)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neon.output_max);
for (; channels >= 24; channels -= 24) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccKLMN = vld1q_s32(buffer); buffer += 4;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vsumGHIJKLMN)));
vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vsumGHIJKLMN)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vaccGHIJ = vreinterpretq_s32_f32(vaddq_f32(vfpaccGHIJ, vmagic_bias));
vaccKLMN = vreinterpretq_s32_f32(vaddq_f32(vfpaccKLMN, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = vqsubq_s32(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = vqsubq_s32(vaccKLMN, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_u8(voutGHIJKLMN, vget_low_u8(voutput_min));
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_u8(voutGHIJKLMN, vget_low_u8(voutput_max));
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
vst1_u8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 21,458
| 47.993151
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-neon-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neon.output_max);
for (; channels >= 8; channels -= 8) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const uint8x8_t vi0x01234567 = vld1_u8(i0);
const uint8x8_t vi1x01234567 = vld1_u8(i1);
const uint8x8_t vi2x01234567 = vld1_u8(i2);
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3);
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4);
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5);
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6);
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 10,205
| 40.319838
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-neonv8-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c16(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(uint8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
const int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
const int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
for (; channels >= 16; channels -= 16) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 14,169
| 44.562701
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-neonv8-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c24(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
const int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
const int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
const int32x4_t vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumGHIJKLMN)));
const int32x4_t vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumGHIJKLMN)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(b + 16);
int32x4_t vaccKLMN = vld1q_s32(b + 20);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vsumGHIJKLMN)));
vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vsumGHIJKLMN)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
for (; channels >= 24; channels -= 24) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccKLMN = vld1q_s32(buffer); buffer += 4;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vsumGHIJKLMN)));
vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vsumGHIJKLMN)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
vaccGHIJ = vcvtnq_s32_f32(vfpaccGHIJ);
vaccKLMN = vcvtnq_s32_f32(vfpaccKLMN);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_u8(voutGHIJKLMN, vget_low_u8(voutput_min));
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_u8(voutGHIJKLMN, vget_low_u8(voutput_max));
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
vst1_u8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 20,823
| 47.203704
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-neonv8-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c32(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 32; c -= 32) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xOPQRSTUV = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xOPQRSTUV = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi2xOPQRSTUV = vld1_u8(i2); i2 += 8;
uint16x8_t vsumOPQRSTUV = vaddl_u8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi3xOPQRSTUV = vld1_u8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi2xOPQRSTUV);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi4xOPQRSTUV = vld1_u8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi3xOPQRSTUV);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi5xOPQRSTUV = vld1_u8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi4xOPQRSTUV);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
const uint8x8_t vi6xOPQRSTUV = vld1_u8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi5xOPQRSTUV);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi6xOPQRSTUV);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
const int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
const int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
const int32x4_t vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumGHIJKLMN)));
const int32x4_t vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumGHIJKLMN)));
const int32x4_t vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumOPQRSTUV)));
const int32x4_t vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumOPQRSTUV)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
vst1q_s32(b, vaccOPQR); b += 4;
vst1q_s32(b, vaccSTUV); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 32; c -= 32) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xOPQRSTUV = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xOPQRSTUV = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi2xOPQRSTUV = vld1_u8(i2); i2 += 8;
uint16x8_t vsumOPQRSTUV = vaddl_u8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi3xOPQRSTUV = vld1_u8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi2xOPQRSTUV);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi4xOPQRSTUV = vld1_u8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi3xOPQRSTUV);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi5xOPQRSTUV = vld1_u8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi4xOPQRSTUV);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
const uint8x8_t vi6xOPQRSTUV = vld1_u8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi5xOPQRSTUV);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(b + 16);
int32x4_t vaccKLMN = vld1q_s32(b + 20);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vaccOPQR = vld1q_s32(b + 24);
int32x4_t vaccSTUV = vld1q_s32(b + 28);
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi6xOPQRSTUV);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vsumGHIJKLMN)));
vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vsumGHIJKLMN)));
vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccOPQR), vget_low_u16(vsumOPQRSTUV)));
vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccSTUV), vget_high_u16(vsumOPQRSTUV)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
vst1q_s32(b, vaccOPQR); b += 4;
vst1q_s32(b, vaccSTUV); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
for (; channels >= 32; channels -= 32) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xOPQRSTUV = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xOPQRSTUV = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi2xOPQRSTUV = vld1_u8(i2); i2 += 8;
uint16x8_t vsumOPQRSTUV = vaddl_u8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi3xOPQRSTUV = vld1_u8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi2xOPQRSTUV);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi4xOPQRSTUV = vld1_u8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi3xOPQRSTUV);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi5xOPQRSTUV = vld1_u8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi4xOPQRSTUV);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
const uint8x8_t vi6xOPQRSTUV = vld1_u8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi5xOPQRSTUV);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccKLMN = vld1q_s32(buffer); buffer += 4;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vaccOPQR = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccSTUV = vld1q_s32(buffer); buffer += 4;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi6xOPQRSTUV);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vsumGHIJKLMN)));
vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vsumGHIJKLMN)));
vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccOPQR), vget_low_u16(vsumOPQRSTUV)));
vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccSTUV), vget_high_u16(vsumOPQRSTUV)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
float32x4_t vfpaccOPQR = vcvtq_f32_s32(vaccOPQR);
float32x4_t vfpaccSTUV = vcvtq_f32_s32(vaccSTUV);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vfpaccOPQR = vmulq_f32(vfpaccOPQR, vscale);
vfpaccSTUV = vmulq_f32(vfpaccSTUV, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
vaccGHIJ = vcvtnq_s32_f32(vfpaccGHIJ);
vaccKLMN = vcvtnq_s32_f32(vfpaccKLMN);
vaccOPQR = vcvtnq_s32_f32(vfpaccOPQR);
vaccSTUV = vcvtnq_s32_f32(vfpaccSTUV);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
int16x8_t vaccOPQRSTUV = vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = vqaddq_s16(vaccOPQRSTUV, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
uint8x16_t voutGHIJKLMNOPQRSTUV = vqmovun_high_s16(vqmovun_s16(vaccGHIJKLMN), vaccOPQRSTUV);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x16_t voutGHIJKLMNOPQRSTUV = vcombine_u8(vqmovun_s16(vaccGHIJKLMN), vqmovun_s16(vaccOPQRSTUV));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_u8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_u8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
vst1q_u8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 24,794
| 49.192308
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-neonv8-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neonv8.output_max);
for (; channels >= 8; channels -= 8) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const uint8x8_t vi0x01234567 = vld1_u8(i0);
const uint8x8_t vi1x01234567 = vld1_u8(i1);
const uint8x8_t vi2x01234567 = vld1_u8(i2);
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3);
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4);
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5);
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6);
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 9,856
| 39.563786
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-scalar-fmagic-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c1(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 1) * sizeof(uint8_t);
const int32_t vinit_bias = params->fp32_scalar_fmagic.init_bias;
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
*b++ = vacc;
} while (--c != 0);
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
*b++ = vacc;
} while (--c != 0);
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
int32_t vacc = *buffer++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--channels != 0);
}
| 4,907
| 30.461538
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-scalar-fmagic-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 2) * sizeof(uint8_t);
const int32_t vinit_bias = params->fp32_scalar_fmagic.init_bias;
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 2) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 2) {
int32_t vacc0 = b[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = b[1];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
for (; channels >= 2; channels -= 2) {
int32_t vacc0 = buffer[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = buffer[1];
const int32_t vi0x1 = (int32_t) i0[1];
buffer += 2;
i0 += 2;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(channels != 0) {
int32_t vacc = *buffer;
const int32_t vi0 = (int32_t) *i0;
const int32_t vi1 = (int32_t) *i1;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output = (uint8_t) vout;
}
}
| 7,979
| 29.458015
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-scalar-fmagic-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c4(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 4) * sizeof(uint8_t);
const int32_t vinit_bias = params->fp32_scalar_fmagic.init_bias;
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 4) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
int32_t vacc2 = vi0x2 + vinit_bias;
const int32_t vi1x2 = (int32_t) i1[2];
int32_t vacc3 = vi0x3 + vinit_bias;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 4) {
int32_t vacc0 = b[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = b[1];
const int32_t vi0x1 = (int32_t) i0[1];
int32_t vacc2 = b[2];
const int32_t vi0x2 = (int32_t) i0[2];
int32_t vacc3 = b[3];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
vacc2 += vi0x2;
const int32_t vi1x2 = (int32_t) i1[2];
vacc3 += vi0x3;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
for (; channels >= 4; channels -= 4) {
int32_t vacc0 = buffer[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = buffer[1];
const int32_t vi0x1 = (int32_t) i0[1];
int32_t vacc2 = buffer[2];
const int32_t vi0x2 = (int32_t) i0[2];
int32_t vacc3 = buffer[3];
const int32_t vi0x3 = (int32_t) i0[3];
buffer += 4;
i0 += 4;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
vacc2 += vi0x2;
const int32_t vi1x2 = (int32_t) i1[2];
vacc3 += vi0x3;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(channels != 0) {
do {
int32_t vacc = *buffer++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--channels != 0);
}
}
| 11,675
| 30.728261
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-scalar-imagic-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 1) * sizeof(uint8_t);
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
*b++ = vacc;
} while (--c != 0);
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
*b++ = vacc;
} while (--c != 0);
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
int32_t vacc = *buffer++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--channels != 0);
}
| 4,791
| 29.522293
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-scalar-imagic-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c2(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 2) * sizeof(uint8_t);
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 2) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 2) {
int32_t vacc0 = b[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = b[1];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
for (; channels >= 2; channels -= 2) {
int32_t vacc0 = buffer[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = buffer[1];
const int32_t vi0x1 = (int32_t) i0[1];
buffer += 2;
i0 += 2;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(channels != 0) {
int32_t vacc = *buffer;
const int32_t vi0 = (int32_t) *i0;
const int32_t vi1 = (int32_t) *i1;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output = (uint8_t) vout;
}
}
| 7,790
| 28.289474
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-scalar-imagic-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 4) * sizeof(uint8_t);
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 4) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
int32_t vacc2 = vi0x2 + vinit_bias;
const int32_t vi1x2 = (int32_t) i1[2];
int32_t vacc3 = vi0x3 + vinit_bias;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 4) {
int32_t vacc0 = b[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = b[1];
const int32_t vi0x1 = (int32_t) i0[1];
int32_t vacc2 = b[2];
const int32_t vi0x2 = (int32_t) i0[2];
int32_t vacc3 = b[3];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
vacc2 += vi0x2;
const int32_t vi1x2 = (int32_t) i1[2];
vacc3 += vi0x3;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
for (; channels >= 4; channels -= 4) {
int32_t vacc0 = buffer[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = buffer[1];
const int32_t vi0x1 = (int32_t) i0[1];
int32_t vacc2 = buffer[2];
const int32_t vi0x2 = (int32_t) i0[2];
int32_t vacc3 = buffer[3];
const int32_t vi0x3 = (int32_t) i0[3];
buffer += 4;
i0 += 4;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
vacc2 += vi0x2;
const int32_t vi1x2 = (int32_t) i1[2];
vacc3 += vi0x3;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2);
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout2 = math_max_s32(vout2, vmagic_min);
vout3 = math_max_s32(vout3, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout2 = math_min_s32(vout2, vmagic_max);
vout3 = math_min_s32(vout3, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
vout2 -= vmagic_bias_less_zero_point;
vout3 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(channels != 0) {
do {
int32_t vacc = *buffer++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--channels != 0);
}
}
| 11,414
| 29.52139
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-scalar-lrintf-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c1(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 1) * sizeof(uint8_t);
const int32_t vinit_bias = params->fp32_scalar_lrintf.init_bias;
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
*b++ = vacc;
} while (--c != 0);
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
*b++ = vacc;
} while (--c != 0);
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
int32_t vacc = *buffer++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--channels != 0);
}
| 4,811
| 29.846154
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-scalar-lrintf-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c2(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 2) * sizeof(uint8_t);
const int32_t vinit_bias = params->fp32_scalar_lrintf.init_bias;
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 2) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 2) {
int32_t vacc0 = b[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = b[1];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
for (; channels >= 2; channels -= 2) {
int32_t vacc0 = buffer[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = buffer[1];
const int32_t vi0x1 = (int32_t) i0[1];
buffer += 2;
i0 += 2;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = vrndacc0 + voutput_zero_point;
int32_t vout1 = vrndacc1 + voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(channels != 0) {
int32_t vacc = *buffer;
const int32_t vi0 = (int32_t) *i0;
const int32_t vi1 = (int32_t) *i1;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output = (uint8_t) vout;
}
}
| 7,855
| 28.984733
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-scalar-lrintf-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c4(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 4) * sizeof(uint8_t);
const int32_t vinit_bias = params->fp32_scalar_lrintf.init_bias;
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 4) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
int32_t vacc2 = vi0x2 + vinit_bias;
const int32_t vi1x2 = (int32_t) i1[2];
int32_t vacc3 = vi0x3 + vinit_bias;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 4) {
int32_t vacc0 = b[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = b[1];
const int32_t vi0x1 = (int32_t) i0[1];
int32_t vacc2 = b[2];
const int32_t vi0x2 = (int32_t) i0[2];
int32_t vacc3 = b[3];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
vacc2 += vi0x2;
const int32_t vi1x2 = (int32_t) i1[2];
vacc3 += vi0x3;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
for (; channels >= 4; channels -= 4) {
int32_t vacc0 = buffer[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = buffer[1];
const int32_t vi0x1 = (int32_t) i0[1];
int32_t vacc2 = buffer[2];
const int32_t vi0x2 = (int32_t) i0[2];
int32_t vacc3 = buffer[3];
const int32_t vi0x3 = (int32_t) i0[3];
buffer += 4;
i0 += 4;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
vacc2 += vi0x2;
const int32_t vi1x2 = (int32_t) i1[2];
vacc3 += vi0x3;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = vrndacc0 + voutput_zero_point;
int32_t vout1 = vrndacc1 + voutput_zero_point;
int32_t vout2 = vrndacc2 + voutput_zero_point;
int32_t vout3 = vrndacc3 + voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(channels != 0) {
do {
int32_t vacc = *buffer++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--channels != 0);
}
}
| 11,523
| 30.315217
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-sse2-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c16(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(uint8_t);
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
const __m128i vzero = _mm_setzero_si128();
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
i0 += 16;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi0x89ABCDEF = _mm_unpacklo_epi8(vi0x89ABCDEF, vzero);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
i1 += 16;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi1x89ABCDEF = _mm_unpacklo_epi8(vi1x89ABCDEF, vzero);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
i2 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_unpacklo_epi8(vi2x89ABCDEF, vzero);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_unpacklo_epi8(vi3x89ABCDEF, vzero);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, vzero);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_unpacklo_epi8(vi5x89ABCDEF, vzero);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_unpacklo_epi8(vi6x89ABCDEF, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vzero);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
_mm_store_si128((__m128i*) (b + 8), vacc89AB);
_mm_store_si128((__m128i*) (b + 12), vaccCDEF);
b += 16;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
i0 += 16;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi0x89ABCDEF = _mm_unpacklo_epi8(vi0x89ABCDEF, vzero);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
i1 += 16;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi1x89ABCDEF = _mm_unpacklo_epi8(vi1x89ABCDEF, vzero);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
i2 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_unpacklo_epi8(vi2x89ABCDEF, vzero);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_unpacklo_epi8(vi3x89ABCDEF, vzero);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, vzero);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_unpacklo_epi8(vi5x89ABCDEF, vzero);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_unpacklo_epi8(vi6x89ABCDEF, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vzero);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (b + 8)));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (b + 12)));
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
_mm_store_si128((__m128i*) (b + 8), vacc89AB);
_mm_store_si128((__m128i*) (b + 12), vaccCDEF);
b += 16;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
for (; channels >= 16; channels -= 16) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
i0 += 16;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi0x89ABCDEF = _mm_unpacklo_epi8(vi0x89ABCDEF, vzero);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
i1 += 16;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi1x89ABCDEF = _mm_unpacklo_epi8(vi1x89ABCDEF, vzero);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
i2 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_unpacklo_epi8(vi2x89ABCDEF, vzero);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_unpacklo_epi8(vi3x89ABCDEF, vzero);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, vzero);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_unpacklo_epi8(vi5x89ABCDEF, vzero);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_unpacklo_epi8(vi6x89ABCDEF, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vzero);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (buffer + 8)));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (buffer + 12)));
buffer += 16;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
uint32_t vout0123 = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) vout0123);
vout0123 >>= 16;
output += 2;
}
if (channels & 1) {
*output = (uint8_t) vout0123;
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 19,094
| 43.823944
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-sse2-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
const __m128i vzero = _mm_setzero_si128();
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
for (; channels >= 8; channels -= 8) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
uint32_t vout0123 = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) vout0123);
vout0123 >>= 16;
output += 2;
}
if (channels & 1) {
*output = (uint8_t) vout0123;
}
}
}
}
| 12,847
| 37.582583
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-sse41-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-sse4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c16(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(uint8_t);
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
i0 += 16;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
i1 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
i2 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
const __m128i vzero = _mm_setzero_si128();
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_cvtepu16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
_mm_store_si128((__m128i*) (b + 8), vacc89AB);
_mm_store_si128((__m128i*) (b + 12), vaccCDEF);
b += 16;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
i0 += 16;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
i1 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
i2 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
const __m128i vzero = _mm_setzero_si128();
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_cvtepu16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (b + 8)));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (b + 12)));
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
_mm_store_si128((__m128i*) (b + 8), vacc89AB);
_mm_store_si128((__m128i*) (b + 12), vaccCDEF);
b += 16;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
for (; channels >= 16; channels -= 16) {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
i0 += 16;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
i1 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
i2 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
const __m128i vzero = _mm_setzero_si128();
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_cvtepu16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (buffer + 8)));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (buffer + 12)));
buffer += 16;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, _mm_setzero_si128());
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 16,479
| 45.818182
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-sse41-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-sse4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c24(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
const __m128i vxi0xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 16)));
i0 += 24;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
const __m128i vxi1xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 16)));
i1 += 24;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
__m128i vaccGHIJKLMN = _mm_add_epi16(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const __m128i vxi2xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 16)));
i2 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi2xGHIJKLMN);
const __m128i vxi3xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 16)));
i3 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi3xGHIJKLMN);
const __m128i vxi4xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 16)));
i4 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi4xGHIJKLMN);
const __m128i vxi5xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 16)));
i5 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi5xGHIJKLMN);
const __m128i vxi6xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 16)));
i6 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi6xGHIJKLMN);
const __m128i vzero = _mm_setzero_si128();
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_cvtepu16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
__m128i vaccGHIJ = _mm_cvtepu16_epi32(vaccGHIJKLMN);
__m128i vaccKLMN = _mm_unpackhi_epi16(vaccGHIJKLMN, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
vaccGHIJ = _mm_add_epi32(vaccGHIJ, vinit_bias);
vaccKLMN = _mm_add_epi32(vaccKLMN, vinit_bias);
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
_mm_store_si128((__m128i*) (b + 8), vacc89AB);
_mm_store_si128((__m128i*) (b + 12), vaccCDEF);
_mm_store_si128((__m128i*) (b + 16), vaccGHIJ);
_mm_store_si128((__m128i*) (b + 20), vaccKLMN);
b += 24;
}
if XNN_UNLIKELY(c != 0) {
do {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, _mm_setzero_si128());
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
b += 8;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
const __m128i vxi0xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 16)));
i0 += 24;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
const __m128i vxi1xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 16)));
i1 += 24;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
__m128i vaccGHIJKLMN = _mm_add_epi16(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const __m128i vxi2xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 16)));
i2 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi2xGHIJKLMN);
const __m128i vxi3xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 16)));
i3 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi3xGHIJKLMN);
const __m128i vxi4xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 16)));
i4 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi4xGHIJKLMN);
const __m128i vxi5xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 16)));
i5 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi5xGHIJKLMN);
const __m128i vxi6xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 16)));
i6 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi6xGHIJKLMN);
const __m128i vzero = _mm_setzero_si128();
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_cvtepu16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
__m128i vaccGHIJ = _mm_cvtepu16_epi32(vaccGHIJKLMN);
__m128i vaccKLMN = _mm_unpackhi_epi16(vaccGHIJKLMN, vzero);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (b + 8)));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (b + 12)));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_load_si128((const __m128i*) (b + 16)));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_load_si128((const __m128i*) (b + 20)));
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
_mm_store_si128((__m128i*) (b + 8), vacc89AB);
_mm_store_si128((__m128i*) (b + 12), vaccCDEF);
_mm_store_si128((__m128i*) (b + 16), vaccGHIJ);
_mm_store_si128((__m128i*) (b + 20), vaccKLMN);
b += 24;
}
if XNN_UNLIKELY(c != 0) {
do {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, _mm_setzero_si128());
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
b += 8;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
for (; channels >= 24; channels -= 24) {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
const __m128i vxi0xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 16)));
i0 += 24;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
const __m128i vxi1xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 16)));
i1 += 24;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
__m128i vaccGHIJKLMN = _mm_add_epi16(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const __m128i vxi2xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 16)));
i2 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi2xGHIJKLMN);
const __m128i vxi3xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 16)));
i3 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi3xGHIJKLMN);
const __m128i vxi4xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 16)));
i4 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi4xGHIJKLMN);
const __m128i vxi5xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 16)));
i5 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi5xGHIJKLMN);
const __m128i vxi6xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 16)));
i6 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi6xGHIJKLMN);
const __m128i vzero = _mm_setzero_si128();
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_cvtepu16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
__m128i vaccGHIJ = _mm_cvtepu16_epi32(vaccGHIJKLMN);
__m128i vaccKLMN = _mm_unpackhi_epi16(vaccGHIJKLMN, vzero);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (buffer + 8)));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (buffer + 12)));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_load_si128((const __m128i*) (buffer + 16)));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_load_si128((const __m128i*) (buffer + 20)));
buffer += 24;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
__m128 vfpaccGHIJ = _mm_cvtepi32_ps(vaccGHIJ);
__m128 vfpaccKLMN = _mm_cvtepi32_ps(vaccKLMN);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpaccGHIJ = _mm_mul_ps(vfpaccGHIJ, vscale);
vfpaccKLMN = _mm_mul_ps(vfpaccKLMN, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vfpaccGHIJ = _mm_min_ps(vfpaccGHIJ, voutput_max_less_zero_point);
vfpaccKLMN = _mm_min_ps(vfpaccKLMN, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
vaccGHIJ = _mm_cvtps_epi32(vfpaccGHIJ);
vaccKLMN = _mm_cvtps_epi32(vfpaccKLMN);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packus_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epu8(voutGHIJKLMNGHIJKLMN, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, _mm_setzero_si128());
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 24,827
| 48.955734
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-sse41-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-sse4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vzero = _mm_setzero_si128();
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vzero = _mm_setzero_si128();
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
for (; channels >= 8; channels -= 8) {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vzero = _mm_setzero_si128();
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, _mm_setzero_si128());
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 11,403
| 39.728571
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-wasmsimd-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c16(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(uint8_t);
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
i0 += 16;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
i1 += 16;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
i2 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
i3 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_u16x8_load8x8(i4 + 8);
i4 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_u16x8_load8x8(i5 + 8);
i5 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_u16x8_load8x8(i6 + 8);
i6 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
const v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
const v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
const v128_t vacc89AB = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc89ABCDEF));
const v128_t vaccCDEF = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc89ABCDEF));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
wasm_v128_store(b + 8, vacc89AB);
wasm_v128_store(b + 12, vaccCDEF);
b += 16;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
i0 += 16;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
i1 += 16;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
i2 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
i3 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_u16x8_load8x8(i4 + 8);
i4 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_u16x8_load8x8(i5 + 8);
i5 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_u16x8_load8x8(i6 + 8);
i6 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
v128_t vacc0123 = wasm_v128_load(b);
v128_t vacc4567 = wasm_v128_load(b + 4);
v128_t vacc89AB = wasm_v128_load(b + 8);
v128_t vaccCDEF = wasm_v128_load(b + 12);
vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_u32x4_extend_low_u16x8(vacc89ABCDEF));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_u32x4_extend_high_u16x8(vacc89ABCDEF));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
wasm_v128_store(b + 8, vacc89AB);
wasm_v128_store(b + 12, vaccCDEF);
b += 16;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 16; channels -= 16) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
i0 += 16;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
i1 += 16;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
i2 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
i3 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_u16x8_load8x8(i4 + 8);
i4 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_u16x8_load8x8(i5 + 8);
i5 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_u16x8_load8x8(i6 + 8);
i6 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
v128_t vacc0123 = wasm_v128_load(buffer);
v128_t vacc4567 = wasm_v128_load(buffer + 4);
v128_t vacc89AB = wasm_v128_load(buffer + 8);
v128_t vaccCDEF = wasm_v128_load(buffer + 12);
buffer += 16;
vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_u32x4_extend_low_u16x8(vacc89ABCDEF));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_u32x4_extend_high_u16x8(vacc89ABCDEF));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_v128_load(buffer);
v128_t vacc4567 = wasm_v128_load(buffer + 4);
buffer += 8;
vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(channels >= 8) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 14,736
| 40.985755
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-wasmsimd-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c24(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
const v128_t vxi0xGHIJKLMN = wasm_u16x8_load8x8(i0 + 16);
i0 += 24;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
const v128_t vxi1xGHIJKLMN = wasm_u16x8_load8x8(i1 + 16);
i1 += 24;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const v128_t vxi2xGHIJKLMN = wasm_u16x8_load8x8(i2 + 16);
i2 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
const v128_t vxi3xGHIJKLMN = wasm_u16x8_load8x8(i3 + 16);
i3 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_u16x8_load8x8(i4 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
const v128_t vxi4xGHIJKLMN = wasm_u16x8_load8x8(i4 + 16);
i4 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_u16x8_load8x8(i5 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
const v128_t vxi5xGHIJKLMN = wasm_u16x8_load8x8(i5 + 16);
i5 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_u16x8_load8x8(i6 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
const v128_t vxi6xGHIJKLMN = wasm_u16x8_load8x8(i6 + 16);
i6 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
const v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
const v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
const v128_t vacc89AB = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc89ABCDEF));
const v128_t vaccCDEF = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc89ABCDEF));
const v128_t vaccGHIJ = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vaccGHIJKLMN));
const v128_t vaccKLMN = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vaccGHIJKLMN));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
wasm_v128_store(b + 8, vacc89AB);
wasm_v128_store(b + 12, vaccCDEF);
wasm_v128_store(b + 16, vaccGHIJ);
wasm_v128_store(b + 20, vaccKLMN);
b += 24;
}
if XNN_UNLIKELY(c != 0) {
do {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
const v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
const v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
b += 8;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
const v128_t vxi0xGHIJKLMN = wasm_u16x8_load8x8(i0 + 16);
i0 += 24;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
const v128_t vxi1xGHIJKLMN = wasm_u16x8_load8x8(i1 + 16);
i1 += 24;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const v128_t vxi2xGHIJKLMN = wasm_u16x8_load8x8(i2 + 16);
i2 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
const v128_t vxi3xGHIJKLMN = wasm_u16x8_load8x8(i3 + 16);
i3 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_u16x8_load8x8(i4 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
const v128_t vxi4xGHIJKLMN = wasm_u16x8_load8x8(i4 + 16);
i4 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_u16x8_load8x8(i5 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
const v128_t vxi5xGHIJKLMN = wasm_u16x8_load8x8(i5 + 16);
i5 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_u16x8_load8x8(i6 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
const v128_t vxi6xGHIJKLMN = wasm_u16x8_load8x8(i6 + 16);
i6 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
v128_t vacc0123 = wasm_v128_load(b);
v128_t vacc4567 = wasm_v128_load(b + 4);
v128_t vacc89AB = wasm_v128_load(b + 8);
v128_t vaccCDEF = wasm_v128_load(b + 12);
v128_t vaccGHIJ = wasm_v128_load(b + 16);
v128_t vaccKLMN = wasm_v128_load(b + 20);
vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_u32x4_extend_low_u16x8(vacc89ABCDEF));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_u32x4_extend_high_u16x8(vacc89ABCDEF));
vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_u32x4_extend_low_u16x8(vaccGHIJKLMN));
vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_u32x4_extend_high_u16x8(vaccGHIJKLMN));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
wasm_v128_store(b + 8, vacc89AB);
wasm_v128_store(b + 12, vaccCDEF);
wasm_v128_store(b + 16, vaccGHIJ);
wasm_v128_store(b + 20, vaccKLMN);
b += 24;
}
if XNN_UNLIKELY(c != 0) {
do {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_v128_load(b);
v128_t vacc4567 = wasm_v128_load(b + 4);
vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
b += 8;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 24; channels -= 24) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
const v128_t vxi0xGHIJKLMN = wasm_u16x8_load8x8(i0 + 16);
i0 += 24;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
const v128_t vxi1xGHIJKLMN = wasm_u16x8_load8x8(i1 + 16);
i1 += 24;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const v128_t vxi2xGHIJKLMN = wasm_u16x8_load8x8(i2 + 16);
i2 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
const v128_t vxi3xGHIJKLMN = wasm_u16x8_load8x8(i3 + 16);
i3 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_u16x8_load8x8(i4 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
const v128_t vxi4xGHIJKLMN = wasm_u16x8_load8x8(i4 + 16);
i4 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_u16x8_load8x8(i5 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
const v128_t vxi5xGHIJKLMN = wasm_u16x8_load8x8(i5 + 16);
i5 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_u16x8_load8x8(i6 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
const v128_t vxi6xGHIJKLMN = wasm_u16x8_load8x8(i6 + 16);
i6 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
v128_t vacc0123 = wasm_v128_load(buffer);
v128_t vacc4567 = wasm_v128_load(buffer + 4);
v128_t vacc89AB = wasm_v128_load(buffer + 8);
v128_t vaccCDEF = wasm_v128_load(buffer + 12);
v128_t vaccGHIJ = wasm_v128_load(buffer + 16);
v128_t vaccKLMN = wasm_v128_load(buffer + 20);
buffer += 24;
vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_u32x4_extend_low_u16x8(vacc89ABCDEF));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_u32x4_extend_high_u16x8(vacc89ABCDEF));
vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_u32x4_extend_low_u16x8(vaccGHIJKLMN));
vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_u32x4_extend_high_u16x8(vaccGHIJKLMN));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vaccGHIJ = wasm_f32x4_convert_i32x4(vaccGHIJ);
vaccKLMN = wasm_f32x4_convert_i32x4(vaccKLMN);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vaccGHIJ = wasm_f32x4_mul(vaccGHIJ, vscale);
vaccKLMN = wasm_f32x4_mul(vaccKLMN, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vmagic_bias);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vaccGHIJ = wasm_i32x4_max(vaccGHIJ, vmagic_min);
vaccKLMN = wasm_i32x4_max(vaccKLMN, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t voutGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
v128_t voutGHIJKLMNGHIJKLMN = wasm_u8x16_narrow_i16x8(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = wasm_u8x16_min(voutGHIJKLMNGHIJKLMN, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
wasm_v128_store64_lane(output + 16, voutGHIJKLMNGHIJKLMN, 0);
output += 24;
}
if XNN_UNLIKELY(channels != 0) {
do {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_v128_load(buffer);
v128_t vacc4567 = wasm_v128_load(buffer + 4);
buffer += 8;
vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(channels >= 8) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 21,731
| 43.081136
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-fp32-wasmsimd-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
const v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
const v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
b += 8;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_v128_load(b);
v128_t vacc4567 = wasm_v128_load(b + 4);
vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
b += 8;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 8; channels -= 8) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_v128_load(buffer);
v128_t vacc4567 = wasm_v128_load(buffer + 4);
buffer += 8;
vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_v128_load(buffer);
v128_t vacc4567 = wasm_v128_load(buffer + 4);
buffer += 8;
vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
}
}
}
| 10,432
| 36.394265
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-rndnu-neon-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c16(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(uint8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
const int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
const int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
for (; channels >= 16; channels -= 16) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 14,402
| 45.163462
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-rndnu-neon-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c24(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
const int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
const int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
const int32x4_t vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumGHIJKLMN)));
const int32x4_t vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumGHIJKLMN)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(b + 16);
int32x4_t vaccKLMN = vld1q_s32(b + 20);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vsumGHIJKLMN)));
vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vsumGHIJKLMN)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
for (; channels >= 24; channels -= 24) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccKLMN = vld1q_s32(buffer); buffer += 4;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF)));
vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF)));
vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vsumGHIJKLMN)));
vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vsumGHIJKLMN)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vaccGHIJ = vqshlq_s32(vaccGHIJ, vleft_pre_shift);
vaccKLMN = vqshlq_s32(vaccKLMN, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vaccGHIJ = vqdmulhq_s32(vaccGHIJ, vmultiplier);
vaccKLMN = vqdmulhq_s32(vaccKLMN, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vleft_post_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_u8(voutGHIJKLMN, vget_low_u8(voutput_min));
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_u8(voutGHIJKLMN, vget_low_u8(voutput_max));
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
vst1_u8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 21,088
| 47.704388
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7p7x-minmax-rndnu-neon-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->rndnu_neon.output_max);
for (; channels >= 8; channels -= 8) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const uint8x8_t vi0x01234567 = vld1_u8(i0);
const uint8x8_t vi1x01234567 = vld1_u8(i1);
const uint8x8_t vi2x01234567 = vld1_u8(i2);
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3);
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4);
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5);
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6);
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 10,057
| 40.221311
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-neon-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neon_c16(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neon.output_max);
for (; channels >= 16; channels -= 16) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 8,674
| 42.375
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-neon-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neon_c24(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neon.output_max);
for (; channels >= 24; channels -= 24) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
int32x4_t vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumGHIJKLMN)));
int32x4_t vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumGHIJKLMN)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vaccGHIJ = vreinterpretq_s32_f32(vaddq_f32(vfpaccGHIJ, vmagic_bias));
vaccKLMN = vreinterpretq_s32_f32(vaddq_f32(vfpaccKLMN, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = vqsubq_s32(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = vqsubq_s32(vaccKLMN, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_u8(voutGHIJKLMN, vget_low_u8(voutput_min));
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_u8(voutGHIJKLMN, vget_low_u8(voutput_max));
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
vst1_u8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 10,638
| 45.256522
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-neon-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neon_c32(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neon.output_max);
for (; channels >= 32; channels -= 32) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xOPQRSTUV = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xOPQRSTUV = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi2xOPQRSTUV = vld1_u8(i2); i2 += 8;
uint16x8_t vsumOPQRSTUV = vaddl_u8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi3xOPQRSTUV = vld1_u8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi2xOPQRSTUV);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi4xOPQRSTUV = vld1_u8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi3xOPQRSTUV);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi5xOPQRSTUV = vld1_u8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi4xOPQRSTUV);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
const uint8x8_t vi6xOPQRSTUV = vld1_u8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi5xOPQRSTUV);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi6xOPQRSTUV);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
int32x4_t vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumGHIJKLMN)));
int32x4_t vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumGHIJKLMN)));
int32x4_t vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumOPQRSTUV)));
int32x4_t vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumOPQRSTUV)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
float32x4_t vfpaccOPQR = vcvtq_f32_s32(vaccOPQR);
float32x4_t vfpaccSTUV = vcvtq_f32_s32(vaccSTUV);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vfpaccOPQR = vmulq_f32(vfpaccOPQR, vscale);
vfpaccSTUV = vmulq_f32(vfpaccSTUV, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vaccGHIJ = vreinterpretq_s32_f32(vaddq_f32(vfpaccGHIJ, vmagic_bias));
vaccKLMN = vreinterpretq_s32_f32(vaddq_f32(vfpaccKLMN, vmagic_bias));
vaccOPQR = vreinterpretq_s32_f32(vaddq_f32(vfpaccOPQR, vmagic_bias));
vaccSTUV = vreinterpretq_s32_f32(vaddq_f32(vfpaccSTUV, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = vqsubq_s32(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = vqsubq_s32(vaccKLMN, vmagic_bias_less_output_zero_point);
vaccOPQR = vqsubq_s32(vaccOPQR, vmagic_bias_less_output_zero_point);
vaccSTUV = vqsubq_s32(vaccSTUV, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
int16x8_t vaccOPQRSTUV = vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
uint8x16_t voutGHIJKLMNOPQRSTUV = vqmovun_high_s16(vqmovun_s16(vaccGHIJKLMN), vaccOPQRSTUV);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x16_t voutGHIJKLMNOPQRSTUV = vcombine_u8(vqmovun_s16(vaccGHIJKLMN), vqmovun_s16(vaccOPQRSTUV));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_u8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_u8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
vst1q_u8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 12,410
| 47.670588
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-neon-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neon_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neon.output_max);
for (; channels >= 8; channels -= 8) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 6,625
| 38.207101
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-neonv8-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c16(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
for (; channels >= 16; channels -= 16) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 8,182
| 40.964103
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-neonv8-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c24(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
for (; channels >= 24; channels -= 24) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
int32x4_t vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumGHIJKLMN)));
int32x4_t vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumGHIJKLMN)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
vaccGHIJ = vcvtnq_s32_f32(vfpaccGHIJ);
vaccKLMN = vcvtnq_s32_f32(vfpaccKLMN);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_u8(voutGHIJKLMN, vget_low_u8(voutput_min));
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_u8(voutGHIJKLMN, vget_low_u8(voutput_max));
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
vst1_u8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 10,003
| 43.660714
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-neonv8-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c32(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
for (; channels >= 32; channels -= 32) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xOPQRSTUV = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xOPQRSTUV = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi2xOPQRSTUV = vld1_u8(i2); i2 += 8;
uint16x8_t vsumOPQRSTUV = vaddl_u8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi3xOPQRSTUV = vld1_u8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi2xOPQRSTUV);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi4xOPQRSTUV = vld1_u8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi3xOPQRSTUV);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi5xOPQRSTUV = vld1_u8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi4xOPQRSTUV);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
const uint8x8_t vi6xOPQRSTUV = vld1_u8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi5xOPQRSTUV);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi6xOPQRSTUV);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
int32x4_t vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumGHIJKLMN)));
int32x4_t vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumGHIJKLMN)));
int32x4_t vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumOPQRSTUV)));
int32x4_t vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumOPQRSTUV)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
float32x4_t vfpaccOPQR = vcvtq_f32_s32(vaccOPQR);
float32x4_t vfpaccSTUV = vcvtq_f32_s32(vaccSTUV);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vfpaccOPQR = vmulq_f32(vfpaccOPQR, vscale);
vfpaccSTUV = vmulq_f32(vfpaccSTUV, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
vaccGHIJ = vcvtnq_s32_f32(vfpaccGHIJ);
vaccKLMN = vcvtnq_s32_f32(vfpaccKLMN);
vaccOPQR = vcvtnq_s32_f32(vfpaccOPQR);
vaccSTUV = vcvtnq_s32_f32(vfpaccSTUV);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
int16x8_t vaccOPQRSTUV = vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = vqaddq_s16(vaccOPQRSTUV, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
uint8x16_t voutGHIJKLMNOPQRSTUV = vqmovun_high_s16(vqmovun_s16(vaccGHIJKLMN), vaccOPQRSTUV);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x16_t voutGHIJKLMNOPQRSTUV = vcombine_u8(vqmovun_s16(vaccGHIJKLMN), vqmovun_s16(vaccOPQRSTUV));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_u8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_u8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
vst1q_u8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 11,632
| 45.907258
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-neonv8-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neonv8.output_max);
for (; channels >= 8; channels -= 8) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 6,276
| 37.042424
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-scalar-fmagic-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c1(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_fmagic.init_bias;
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--channels != 0);
}
| 2,809
| 30.573034
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-scalar-fmagic-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_fmagic.init_bias;
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
for (; channels >= 2; channels -= 2) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(channels != 0) {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0;
const int32_t vi1 = (int32_t) *i1;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output = (uint8_t) vout;
}
}
| 4,540
| 29.682432
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-scalar-fmagic-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_fmagic.init_bias;
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
for (; channels >= 4; channels -= 4) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
int32_t vacc2 = vi0x2 + vinit_bias;
const int32_t vi1x2 = (int32_t) i1[2];
int32_t vacc3 = vi0x3 + vinit_bias;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(channels != 0) {
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--channels != 0);
}
}
| 6,220
| 31.742105
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-scalar-imagic-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c1(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--channels != 0);
}
| 2,693
| 28.933333
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-scalar-imagic-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c2(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
for (; channels >= 2; channels -= 2) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(channels != 0) {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0;
const int32_t vi1 = (int32_t) *i1;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output = (uint8_t) vout;
}
}
| 4,351
| 27.631579
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-scalar-imagic-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
for (; channels >= 4; channels -= 4) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
int32_t vacc2 = vi0x2 + vinit_bias;
const int32_t vi1x2 = (int32_t) i1[2];
int32_t vacc3 = vi0x3 + vinit_bias;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2);
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout2 = math_max_s32(vout2, vmagic_min);
vout3 = math_max_s32(vout3, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout2 = math_min_s32(vout2, vmagic_max);
vout3 = math_min_s32(vout3, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
vout2 -= vmagic_bias_less_zero_point;
vout3 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(channels != 0) {
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--channels != 0);
}
}
| 5,959
| 29.408163
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-scalar-lrintf-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c1(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_lrintf.init_bias;
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--channels != 0);
}
| 2,713
| 29.494382
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-scalar-lrintf-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c2(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_lrintf.init_bias;
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
for (; channels >= 2; channels -= 2) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = vrndacc0 + voutput_zero_point;
int32_t vout1 = vrndacc1 + voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(channels != 0) {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0;
const int32_t vi1 = (int32_t) *i1;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output = (uint8_t) vout;
}
}
| 4,416
| 28.844595
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-scalar-lrintf-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c4(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_lrintf.init_bias;
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
for (; channels >= 4; channels -= 4) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
int32_t vacc2 = vi0x2 + vinit_bias;
const int32_t vi1x2 = (int32_t) i1[2];
int32_t vacc3 = vi0x3 + vinit_bias;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = vrndacc0 + voutput_zero_point;
int32_t vout1 = vrndacc1 + voutput_zero_point;
int32_t vout2 = vrndacc2 + voutput_zero_point;
int32_t vout3 = vrndacc3 + voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(channels != 0) {
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--channels != 0);
}
}
| 6,068
| 30.942105
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-sse2-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse2_c16(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i vzero = _mm_setzero_si128();
for (; channels >= 16; channels -= 16) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
i0 += 16;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi0x89ABCDEF = _mm_unpacklo_epi8(vi0x89ABCDEF, vzero);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
i1 += 16;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi1x89ABCDEF = _mm_unpacklo_epi8(vi1x89ABCDEF, vzero);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
i2 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_unpacklo_epi8(vi2x89ABCDEF, vzero);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_unpacklo_epi8(vi3x89ABCDEF, vzero);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, vzero);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_unpacklo_epi8(vi5x89ABCDEF, vzero);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_unpacklo_epi8(vi6x89ABCDEF, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vzero);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
uint32_t vout0123 = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) vout0123);
vout0123 >>= 16;
output += 2;
}
if (channels & 1) {
*output = (uint8_t) vout0123;
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 10,151
| 39.608
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-sse2-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse2_c24(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i vzero = _mm_setzero_si128();
for (; channels >= 24; channels -= 24) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
const __m128i vi0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i0 + 16));
i0 += 24;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi0x89ABCDEF = _mm_unpacklo_epi8(vi0x89ABCDEF, vzero);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
const __m128i vxi0xGHIJKLMN = _mm_unpacklo_epi8(vi0xGHIJKLMN, vzero);
const __m128i vi1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i1 + 16));
i1 += 24;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi1x89ABCDEF = _mm_unpacklo_epi8(vi1x89ABCDEF, vzero);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
const __m128i vxi1xGHIJKLMN = _mm_unpacklo_epi8(vi1xGHIJKLMN, vzero);
const __m128i vi2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i2 + 16));
i2 += 24;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_unpacklo_epi8(vi2x89ABCDEF, vzero);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
__m128i vaccGHIJKLMN = _mm_add_epi16(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const __m128i vxi2xGHIJKLMN = _mm_unpacklo_epi8(vi2xGHIJKLMN, vzero);
const __m128i vi3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i3 + 16));
i3 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_unpacklo_epi8(vi3x89ABCDEF, vzero);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi2xGHIJKLMN);
const __m128i vxi3xGHIJKLMN = _mm_unpacklo_epi8(vi3xGHIJKLMN, vzero);
const __m128i vi4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i4 + 16));
i4 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, vzero);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi3xGHIJKLMN);
const __m128i vxi4xGHIJKLMN = _mm_unpacklo_epi8(vi4xGHIJKLMN, vzero);
const __m128i vi5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i5 + 16));
i5 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_unpacklo_epi8(vi5x89ABCDEF, vzero);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi4xGHIJKLMN);
const __m128i vxi5xGHIJKLMN = _mm_unpacklo_epi8(vi5xGHIJKLMN, vzero);
const __m128i vi6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i6 + 16));
i6 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_unpacklo_epi8(vi6x89ABCDEF, vzero);
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi5xGHIJKLMN);
const __m128i vxi6xGHIJKLMN = _mm_unpacklo_epi8(vi6xGHIJKLMN, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi6xGHIJKLMN);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vzero);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
__m128i vaccGHIJ = _mm_unpacklo_epi16(vaccGHIJKLMN, vzero);
__m128i vaccKLMN = _mm_unpackhi_epi16(vaccGHIJKLMN, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
vaccGHIJ = _mm_add_epi32(vaccGHIJ, vinit_bias);
vaccKLMN = _mm_add_epi32(vaccKLMN, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
__m128 vfpaccGHIJ = _mm_cvtepi32_ps(vaccGHIJ);
__m128 vfpaccKLMN = _mm_cvtepi32_ps(vaccKLMN);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpaccGHIJ = _mm_mul_ps(vfpaccGHIJ, vscale);
vfpaccKLMN = _mm_mul_ps(vfpaccKLMN, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vfpaccGHIJ = _mm_min_ps(vfpaccGHIJ, voutput_max_less_zero_point);
vfpaccKLMN = _mm_min_ps(vfpaccKLMN, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
vaccGHIJ = _mm_cvtps_epi32(vfpaccGHIJ);
vaccKLMN = _mm_cvtps_epi32(vfpaccKLMN);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packus_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epu8(voutGHIJKLMNGHIJKLMN, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
uint32_t vout0123 = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) vout0123);
vout0123 >>= 16;
output += 2;
}
if (channels & 1) {
*output = (uint8_t) vout0123;
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 12,589
| 43.020979
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-sse2-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse2_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i vzero = _mm_setzero_si128();
for (; channels >= 8; channels -= 8) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vzero);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
uint32_t vout0123 = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) vout0123);
vout0123 >>= 16;
output += 2;
}
if (channels & 1) {
*output = (uint8_t) vout0123;
}
}
}
}
| 7,666
| 35.684211
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-sse41-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-sse4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse41_c16(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
for (; channels >= 16; channels -= 16) {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
i0 += 16;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
i1 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
i2 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
const __m128i vzero = _mm_setzero_si128();
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_cvtepu16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, _mm_setzero_si128());
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 9,020
| 41.154206
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-sse41-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-sse4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse41_c24(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
for (; channels >= 24; channels -= 24) {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
const __m128i vxi0xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 16)));
i0 += 24;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
const __m128i vxi1xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 16)));
i1 += 24;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
__m128i vaccGHIJKLMN = _mm_add_epi16(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const __m128i vxi2xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 16)));
i2 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi2xGHIJKLMN);
const __m128i vxi3xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 16)));
i3 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi3xGHIJKLMN);
const __m128i vxi4xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 16)));
i4 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi4xGHIJKLMN);
const __m128i vxi5xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 16)));
i5 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi5xGHIJKLMN);
const __m128i vxi6xGHIJKLMN = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 16)));
i6 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi6xGHIJKLMN);
const __m128i vzero = _mm_setzero_si128();
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
__m128i vacc89AB = _mm_cvtepu16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
__m128i vaccGHIJ = _mm_cvtepu16_epi32(vaccGHIJKLMN);
__m128i vaccKLMN = _mm_unpackhi_epi16(vaccGHIJKLMN, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
vaccGHIJ = _mm_add_epi32(vaccGHIJ, vinit_bias);
vaccKLMN = _mm_add_epi32(vaccKLMN, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
__m128 vfpaccGHIJ = _mm_cvtepi32_ps(vaccGHIJ);
__m128 vfpaccKLMN = _mm_cvtepi32_ps(vaccKLMN);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpaccGHIJ = _mm_mul_ps(vfpaccGHIJ, vscale);
vfpaccKLMN = _mm_mul_ps(vfpaccKLMN, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vfpaccGHIJ = _mm_min_ps(vfpaccGHIJ, voutput_max_less_zero_point);
vfpaccKLMN = _mm_min_ps(vfpaccKLMN, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
vaccGHIJ = _mm_cvtps_epi32(vfpaccGHIJ);
vaccKLMN = _mm_cvtps_epi32(vfpaccKLMN);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packus_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epu8(voutGHIJKLMNGHIJKLMN, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, _mm_setzero_si128());
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 11,073
| 44.572016
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-sse41-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-sse4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse41_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
for (; channels >= 8; channels -= 8) {
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vzero = _mm_setzero_si128();
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, _mm_setzero_si128());
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 6,922
| 37.461111
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-wasmsimd-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c16(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 16; channels -= 16) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
i0 += 16;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
i1 += 16;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
i2 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
i3 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_u16x8_load8x8(i4 + 8);
i4 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_u16x8_load8x8(i5 + 8);
i5 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_u16x8_load8x8(i6 + 8);
i6 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
v128_t vacc89AB = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc89ABCDEF));
v128_t vaccCDEF = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc89ABCDEF));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(channels >= 8) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 8,490
| 39.051887
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-wasmsimd-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c24(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 24; channels -= 24) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
const v128_t vxi0xGHIJKLMN = wasm_u16x8_load8x8(i0 + 16);
i0 += 24;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
const v128_t vxi1xGHIJKLMN = wasm_u16x8_load8x8(i1 + 16);
i1 += 24;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const v128_t vxi2xGHIJKLMN = wasm_u16x8_load8x8(i2 + 16);
i2 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
const v128_t vxi3xGHIJKLMN = wasm_u16x8_load8x8(i3 + 16);
i3 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_u16x8_load8x8(i4 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
const v128_t vxi4xGHIJKLMN = wasm_u16x8_load8x8(i4 + 16);
i4 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_u16x8_load8x8(i5 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
const v128_t vxi5xGHIJKLMN = wasm_u16x8_load8x8(i5 + 16);
i5 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_u16x8_load8x8(i6 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
const v128_t vxi6xGHIJKLMN = wasm_u16x8_load8x8(i6 + 16);
i6 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
v128_t vacc89AB = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc89ABCDEF));
v128_t vaccCDEF = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc89ABCDEF));
v128_t vaccGHIJ = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vaccGHIJKLMN));
v128_t vaccKLMN = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vaccGHIJKLMN));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vaccGHIJ = wasm_f32x4_convert_i32x4(vaccGHIJ);
vaccKLMN = wasm_f32x4_convert_i32x4(vaccKLMN);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vaccGHIJ = wasm_f32x4_mul(vaccGHIJ, vscale);
vaccKLMN = wasm_f32x4_mul(vaccKLMN, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vmagic_bias);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vaccGHIJ = wasm_i32x4_max(vaccGHIJ, vmagic_min);
vaccKLMN = wasm_i32x4_max(vaccKLMN, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t voutGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
v128_t voutGHIJKLMNGHIJKLMN = wasm_u8x16_narrow_i16x8(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = wasm_u8x16_min(voutGHIJKLMNGHIJKLMN, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
wasm_v128_store64_lane(output + 16, voutGHIJKLMNGHIJKLMN, 0);
output += 24;
}
if XNN_UNLIKELY(channels != 0) {
do {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(channels >= 8) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 10,373
| 42.045643
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-wasmsimd-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c32(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 32; channels -= 32) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
const v128_t vxi0xGHIJKLMN = wasm_u16x8_load8x8(i0 + 16);
const v128_t vxi0xOPQRSTUV = wasm_u16x8_load8x8(i0 + 24);
i0 += 32;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
const v128_t vxi1xGHIJKLMN = wasm_u16x8_load8x8(i1 + 16);
const v128_t vxi1xOPQRSTUV = wasm_u16x8_load8x8(i1 + 24);
i1 += 32;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const v128_t vxi2xGHIJKLMN = wasm_u16x8_load8x8(i2 + 16);
v128_t vaccOPQRSTUV = wasm_i16x8_add(vxi0xOPQRSTUV, vxi1xOPQRSTUV);
const v128_t vxi2xOPQRSTUV = wasm_u16x8_load8x8(i2 + 24);
i2 += 32;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
const v128_t vxi3xGHIJKLMN = wasm_u16x8_load8x8(i3 + 16);
vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi2xOPQRSTUV);
const v128_t vxi3xOPQRSTUV = wasm_u16x8_load8x8(i3 + 24);
i3 += 32;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_u16x8_load8x8(i4 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
const v128_t vxi4xGHIJKLMN = wasm_u16x8_load8x8(i4 + 16);
vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi3xOPQRSTUV);
const v128_t vxi4xOPQRSTUV = wasm_u16x8_load8x8(i4 + 24);
i4 += 32;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_u16x8_load8x8(i5 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
const v128_t vxi5xGHIJKLMN = wasm_u16x8_load8x8(i5 + 16);
vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi4xOPQRSTUV);
const v128_t vxi5xOPQRSTUV = wasm_u16x8_load8x8(i5 + 24);
i5 += 32;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_u16x8_load8x8(i6 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
const v128_t vxi6xGHIJKLMN = wasm_u16x8_load8x8(i6 + 16);
vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi5xOPQRSTUV);
const v128_t vxi6xOPQRSTUV = wasm_u16x8_load8x8(i6 + 24);
i6 += 32;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi6xOPQRSTUV);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
v128_t vacc89AB = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc89ABCDEF));
v128_t vaccCDEF = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc89ABCDEF));
v128_t vaccGHIJ = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vaccGHIJKLMN));
v128_t vaccKLMN = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vaccGHIJKLMN));
v128_t vaccOPQR = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vaccOPQRSTUV));
v128_t vaccSTUV = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vaccOPQRSTUV));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vaccGHIJ = wasm_f32x4_convert_i32x4(vaccGHIJ);
vaccKLMN = wasm_f32x4_convert_i32x4(vaccKLMN);
vaccOPQR = wasm_f32x4_convert_i32x4(vaccOPQR);
vaccSTUV = wasm_f32x4_convert_i32x4(vaccSTUV);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vaccGHIJ = wasm_f32x4_mul(vaccGHIJ, vscale);
vaccKLMN = wasm_f32x4_mul(vaccKLMN, vscale);
vaccOPQR = wasm_f32x4_mul(vaccOPQR, vscale);
vaccSTUV = wasm_f32x4_mul(vaccSTUV, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vmagic_bias);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vmagic_bias);
vaccOPQR = wasm_f32x4_add(vaccOPQR, vmagic_bias);
vaccSTUV = wasm_f32x4_add(vaccSTUV, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vaccGHIJ = wasm_i32x4_max(vaccGHIJ, vmagic_min);
vaccKLMN = wasm_i32x4_max(vaccKLMN, vmagic_min);
vaccOPQR = wasm_i32x4_max(vaccOPQR, vmagic_min);
vaccSTUV = wasm_i32x4_max(vaccSTUV, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_output_zero_point);
vaccOPQR = wasm_i32x4_sub(vaccOPQR, vmagic_bias_less_output_zero_point);
vaccSTUV = wasm_i32x4_sub(vaccSTUV, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t voutGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
v128_t voutOPQRSTUV = wasm_i16x8_narrow_i32x4(vaccOPQR, vaccSTUV);
v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
v128_t voutGHIJKLMNOPQRSTUV = wasm_u8x16_narrow_i16x8(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = wasm_u8x16_min(voutGHIJKLMNOPQRSTUV, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
wasm_v128_store(output + 16, voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(channels != 0) {
do {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(channels >= 8) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 12,015
| 44.003745
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-fp32-wasmsimd-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
void xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 8; channels -= 8) {
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
}
}
}
| 6,578
| 35.960674
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-rndnu-neon-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qu8_gavgpool_minmax_rndnu_ukernel_7x__neon_c16(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
for (; channels >= 16; channels -= 16) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 8,415
| 41.938776
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-rndnu-neon-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qu8_gavgpool_minmax_rndnu_ukernel_7x__neon_c24(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
for (; channels >= 24; channels -= 24) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
int32x4_t vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumGHIJKLMN)));
int32x4_t vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumGHIJKLMN)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vaccGHIJ = vqshlq_s32(vaccGHIJ, vleft_pre_shift);
vaccKLMN = vqshlq_s32(vaccKLMN, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vaccGHIJ = vqdmulhq_s32(vaccGHIJ, vmultiplier);
vaccKLMN = vqdmulhq_s32(vaccKLMN, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vleft_post_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x8_t voutGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_u8(voutGHIJKLMN, vget_low_u8(voutput_min));
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_u8(voutGHIJKLMN, vget_low_u8(voutput_max));
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
vst1_u8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 10,268
| 44.64
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-rndnu-neon-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qu8_gavgpool_minmax_rndnu_ukernel_7x__neon_c32(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
for (; channels >= 32; channels -= 32) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
const uint8x8_t vi0xOPQRSTUV = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
const uint8x8_t vi1xOPQRSTUV = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const uint8x8_t vi2xOPQRSTUV = vld1_u8(i2); i2 += 8;
uint16x8_t vsumOPQRSTUV = vaddl_u8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
const uint8x8_t vi3xOPQRSTUV = vld1_u8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi2xOPQRSTUV);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
const uint8x8_t vi4xOPQRSTUV = vld1_u8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi3xOPQRSTUV);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
const uint8x8_t vi5xOPQRSTUV = vld1_u8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi4xOPQRSTUV);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
const uint8x8_t vi6xOPQRSTUV = vld1_u8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi5xOPQRSTUV);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi6xOPQRSTUV);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
int32x4_t vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum89ABCDEF)));
int32x4_t vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum89ABCDEF)));
int32x4_t vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumGHIJKLMN)));
int32x4_t vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumGHIJKLMN)));
int32x4_t vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsumOPQRSTUV)));
int32x4_t vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsumOPQRSTUV)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vaccGHIJ = vqshlq_s32(vaccGHIJ, vleft_pre_shift);
vaccKLMN = vqshlq_s32(vaccKLMN, vleft_pre_shift);
vaccOPQR = vqshlq_s32(vaccOPQR, vleft_pre_shift);
vaccSTUV = vqshlq_s32(vaccSTUV, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vaccGHIJ = vqdmulhq_s32(vaccGHIJ, vmultiplier);
vaccKLMN = vqdmulhq_s32(vaccKLMN, vmultiplier);
vaccOPQR = vqdmulhq_s32(vaccOPQR, vmultiplier);
vaccSTUV = vqdmulhq_s32(vaccSTUV, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vleft_post_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vleft_post_shift);
vaccOPQR = vrshlq_s32(vaccOPQR, vleft_post_shift);
vaccSTUV = vrshlq_s32(vaccSTUV, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
int16x8_t vaccOPQRSTUV = vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = vqaddq_s16(vaccOPQRSTUV, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
uint8x16_t voutGHIJKLMNOPQRSTUV = vqmovun_high_s16(vqmovun_s16(vaccGHIJKLMN), vaccOPQRSTUV);
#else // !XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x16_t voutGHIJKLMNOPQRSTUV = vcombine_u8(vqmovun_s16(vaccGHIJKLMN), vqmovun_s16(vaccOPQRSTUV));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_u8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_u8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
vst1q_u8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_u8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 11,929
| 46.911647
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gavgpool/gen/qu8-gavgpool-7x-minmax-rndnu-neon-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qu8_gavgpool_minmax_rndnu_ukernel_7x__neon_c8(
size_t rows,
size_t channels,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint8_t* i0 = input;
const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->rndnu_neon.output_max);
for (; channels >= 8; channels -= 8) {
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 6,477
| 38.024096
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x16-minmax-fp32-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x16__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const uint8x8_t vb_zero_point = vld1_dup_u8(¶ms->fp32_neon.kernel_zero_point[0]);
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(uint8_t)) {
const uint8x8_t va0 = vld1_u8(a0); a0 += 8;
const int16x8_t vxa0 = vreinterpretq_s16_u16(vmovl_u8(va0));
const uint8x8_t vb01234567c0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c0 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const uint8x8_t vb89ABCDEFc0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc0, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const uint8x8_t vb01234567c1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c1 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const uint8x8_t vb89ABCDEFc1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc1, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const uint8x8_t vb01234567c2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c2 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const uint8x8_t vb89ABCDEFc2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc2, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const uint8x8_t vb01234567c3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c3 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const uint8x8_t vb89ABCDEFc3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc3, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
const uint8x8_t vb01234567c4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c4 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const uint8x8_t vb89ABCDEFc4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc4, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const uint8x8_t vb01234567c5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c5 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const uint8x8_t vb89ABCDEFc5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc5, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const uint8x8_t vb01234567c6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c6 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const uint8x8_t vb89ABCDEFc6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc6, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const uint8x8_t vb01234567c7 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c7 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c7, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const uint8x8_t vb89ABCDEFc7 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc7, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(uint8_t);
}
if XNN_UNLIKELY(k != 0) {
const uint8x8_t va0 = vld1_u8(a0); a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vreinterpretq_s16_u16(vmovl_u8(va0));
const uint8x8_t vb01234567c0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c0 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c0, vb_zero_point));
const uint8x8_t vb89ABCDEFc0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c1 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c1, vb_zero_point));
const uint8x8_t vb89ABCDEFc1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c2 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c2, vb_zero_point));
const uint8x8_t vb89ABCDEFc2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c3 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c3, vb_zero_point));
const uint8x8_t vb89ABCDEFc3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c4 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c4, vb_zero_point));
const uint8x8_t vb89ABCDEFc4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c5 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c5, vb_zero_point));
const uint8x8_t vb89ABCDEFc5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c6 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c6, vb_zero_point));
const uint8x8_t vb89ABCDEFc6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale);
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
vacc0x89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x89AB, vmagic_bias));
vacc0xCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpacc0xCDEF, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
vacc0x89AB = vqsubq_s32(vacc0x89AB, vmagic_bias_less_output_zero_point);
vacc0xCDEF = vqsubq_s32(vacc0xCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neon.output_max);
vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
uint8x8_t vout0x01234567 = vget_low_u8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_u8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_u8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_u8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_u8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,435
| 57.314381
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x16-minmax-fp32-neonv8-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x16__neonv8_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const uint8x8_t vb_zero_point = vld1_dup_u8(¶ms->fp32_neonv8.kernel_zero_point[0]);
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(uint8_t)) {
const uint8x8_t va0 = vld1_u8(a0); a0 += 8;
const int16x8_t vxa0 = vreinterpretq_s16_u16(vmovl_u8(va0));
const uint8x8_t vb01234567c0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c0 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const uint8x8_t vb89ABCDEFc0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc0, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const uint8x8_t vb01234567c1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c1 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const uint8x8_t vb89ABCDEFc1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc1, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const uint8x8_t vb01234567c2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c2 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const uint8x8_t vb89ABCDEFc2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc2, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const uint8x8_t vb01234567c3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c3 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const uint8x8_t vb89ABCDEFc3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc3, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
const uint8x8_t vb01234567c4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c4 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const uint8x8_t vb89ABCDEFc4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc4, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const uint8x8_t vb01234567c5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c5 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const uint8x8_t vb89ABCDEFc5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc5, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const uint8x8_t vb01234567c6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c6 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const uint8x8_t vb89ABCDEFc6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc6, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const uint8x8_t vb01234567c7 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c7 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c7, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const uint8x8_t vb89ABCDEFc7 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc7, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(uint8_t);
}
if XNN_UNLIKELY(k != 0) {
const uint8x8_t va0 = vld1_u8(a0); a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vreinterpretq_s16_u16(vmovl_u8(va0));
const uint8x8_t vb01234567c0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c0 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c0, vb_zero_point));
const uint8x8_t vb89ABCDEFc0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c1 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c1, vb_zero_point));
const uint8x8_t vb89ABCDEFc1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c2 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c2, vb_zero_point));
const uint8x8_t vb89ABCDEFc2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c3 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c3, vb_zero_point));
const uint8x8_t vb89ABCDEFc3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c4 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c4, vb_zero_point));
const uint8x8_t vb89ABCDEFc4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c5 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c5, vb_zero_point));
const uint8x8_t vb89ABCDEFc5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c6 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c6, vb_zero_point));
const uint8x8_t vb89ABCDEFc6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale);
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
vacc0x89AB = vcvtnq_s32_f32(vfpacc0x89AB);
vacc0xCDEF = vcvtnq_s32_f32(vfpacc0xCDEF);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
uint8x8_t vout0x01234567 = vget_low_u8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_u8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_u8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_u8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_u8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,217
| 56.778523
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x16-minmax-rndnu-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_qu8_gemm_minmax_rndnu_ukernel_1x16__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const uint8x8_t vb_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(uint8_t)) {
const uint8x8_t va0 = vld1_u8(a0); a0 += 8;
const int16x8_t vxa0 = vreinterpretq_s16_u16(vmovl_u8(va0));
const uint8x8_t vb01234567c0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c0 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const uint8x8_t vb89ABCDEFc0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc0, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const uint8x8_t vb01234567c1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c1 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const uint8x8_t vb89ABCDEFc1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc1, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const uint8x8_t vb01234567c2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c2 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const uint8x8_t vb89ABCDEFc2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc2, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const uint8x8_t vb01234567c3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c3 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const uint8x8_t vb89ABCDEFc3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc3, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
const uint8x8_t vb01234567c4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c4 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const uint8x8_t vb89ABCDEFc4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc4, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const uint8x8_t vb01234567c5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c5 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const uint8x8_t vb89ABCDEFc5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc5, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const uint8x8_t vb01234567c6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c6 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const uint8x8_t vb89ABCDEFc6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc6, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const uint8x8_t vb01234567c7 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c7 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c7, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const uint8x8_t vb89ABCDEFc7 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc7, vb_zero_point));
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(uint8_t);
}
if XNN_UNLIKELY(k != 0) {
const uint8x8_t va0 = vld1_u8(a0); a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vreinterpretq_s16_u16(vmovl_u8(va0));
const uint8x8_t vb01234567c0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c0 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c0, vb_zero_point));
const uint8x8_t vb89ABCDEFc0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c1 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c1, vb_zero_point));
const uint8x8_t vb89ABCDEFc1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c2 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c2, vb_zero_point));
const uint8x8_t vb89ABCDEFc2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c3 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c3, vb_zero_point));
const uint8x8_t vb89ABCDEFc3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c4 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c4, vb_zero_point));
const uint8x8_t vb89ABCDEFc4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c5 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c5, vb_zero_point));
const uint8x8_t vb89ABCDEFc5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c6 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c6, vb_zero_point));
const uint8x8_t vb89ABCDEFc6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEFc6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
uint8x8_t vout0x01234567 = vget_low_u8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_u8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_u8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_u8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_u8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,435
| 57.12
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x16c4-minmax-fp32-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qu8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->fp32_neonv8.kernel_zero_point[0]);
// Loop over groups of 16 columns.
do {
// Initialize accumulators with bias. 16 bias values are loaded from the
// weight matrix, at the start of the group of 16 columns.
uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x2_t vnacc0 = vmov_n_u32(0);
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(uint8_t)) {
// Load a 1x8 block of activations.
const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
// Load a 8x16 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 1x8 * 8x16 --> 1x16.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
k -= 8 * sizeof(uint8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 1x4 block of activations.
const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
// Load a 4x16 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 1x4 * 4x16 --> 1x16.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
}
// Subtract zero point from accumulators.
vnacc0 = vpadd_u32(vnacc0, vnacc0);
const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x0123));
int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0x0123));
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale);
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
vacc0x89AB = vcvtnq_s32_f32(vfpacc0x89AB);
vacc0xCDEF = vcvtnq_s32_f32(vfpacc0xCDEF);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
uint8x8_t vout0x01234567 = vget_low_u8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_u8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_u8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_u8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_u8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,908
| 43.432584
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x16c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qu8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
// Loop over groups of 16 columns.
do {
// Initialize accumulators with bias. 16 bias values are loaded from the
// weight matrix, at the start of the group of 16 columns.
uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x2_t vnacc0 = vmov_n_u32(0);
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(uint8_t)) {
// Load a 1x8 block of activations.
const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
// Load a 8x16 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 1x8 * 8x16 --> 1x16.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
k -= 8 * sizeof(uint8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 1x4 block of activations.
const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
// Load a 4x16 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 1x4 * 4x16 --> 1x16.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
}
// Subtract zero point from accumulators.
vnacc0 = vpadd_u32(vnacc0, vnacc0);
const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x0123));
int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0x0123));
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
uint8x8_t vout0x01234567 = vget_low_u8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_u8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_u8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_u8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_u8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,124
| 44.138889
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x1c4-minmax-fp32-armsimd32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x1c4__armsimd32(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
const int16x2_t vb_minus_zero_point = (int16x2_t) params->fp32_armsimd32.minus_kernel_zero_point;
const float vscale = params->fp32_armsimd32.scale;
const float vmagic_bias = params->fp32_armsimd32.magic_bias;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
w = (const void*) ((const int32_t*) w + 1);
size_t k = kc;
do {
const int8x4_t va0 = (int8x4_t) unaligned_load_s32(a0); a0 += 4;
const int16x2_t va0c02 = __uxtb16(va0);
const int16x2_t va0c13 = __uxtb16(__ror(va0, 8));
const int8x4_t vb0 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb0c02 = __uxtab16(vb_minus_zero_point, vb0);
vacc0x0 = __smlad(va0c02, vb0c02, vacc0x0);
const int16x2_t vb0c13 = __uxtab16(vb_minus_zero_point, __ror(vb0, 8));
vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0);
k -= 4 * sizeof(uint8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
vfpacc0x0 *= vscale;
vfpacc0x0 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
const int32_t vmagic_bias_less_zero_point = params->fp32_armsimd32.magic_bias_less_zero_point;
vout0x0 = __qsub(vout0x0, vmagic_bias_less_zero_point);
vout0x0 = __usat(vout0x0, 8);
const uint32_t vout0 = (uint32_t) vout0x0;
uint32_t vout = vout0;
const int8x4_t voutput_min = (int8x4_t) params->fp32_armsimd32.output_min;
__usub8((int8x4_t) vout, voutput_min);
vout = (uint32_t) __sel((uint8x4_t) vout, (uint8x4_t) voutput_min);
const int8x4_t voutput_max = (int8x4_t) params->fp32_armsimd32.output_max;
__usub8((int8x4_t) vout, voutput_max);
vout = (uint32_t) __sel((uint8x4_t) voutput_max, (uint8x4_t) vout);
*c0 = (uint8_t) vout;
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 1;
} while (nc != 0);
}
| 2,837
| 27.38
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x2-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const uint8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,919
| 28.795918
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x2-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const int32_t vb_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const uint8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale = params->fp32_scalar_imagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,772
| 26.455446
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x2-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const uint8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,812
| 27.704082
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x2-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x2__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const uint8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,957
| 29.183673
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x2-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_rndnu_ukernel_1x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const int32_t vb_zero_point = params->rndnu_scalar.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const uint8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding;
const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding;
const uint32_t vshift = params->rndnu_scalar.shift;
int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift);
int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift);
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
vout0x0 += voutput_zero_point;
vout0x1 += voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,882
| 29.03125
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x2c4-minmax-fp32-armsimd32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x2c4__armsimd32(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
const int16x2_t vb_minus_zero_point = (int16x2_t) params->fp32_armsimd32.minus_kernel_zero_point;
const float vscale = params->fp32_armsimd32.scale;
const float vmagic_bias = params->fp32_armsimd32.magic_bias;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
w = (const void*) ((const int32_t*) w + 2);
size_t k = kc;
do {
const int8x4_t va0 = (int8x4_t) unaligned_load_s32(a0); a0 += 4;
const int16x2_t va0c02 = __uxtb16(va0);
const int16x2_t va0c13 = __uxtb16(__ror(va0, 8));
const int8x4_t vb0 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb0c02 = __uxtab16(vb_minus_zero_point, vb0);
vacc0x0 = __smlad(va0c02, vb0c02, vacc0x0);
const int16x2_t vb0c13 = __uxtab16(vb_minus_zero_point, __ror(vb0, 8));
vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0);
const int8x4_t vb1 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb1c02 = __uxtab16(vb_minus_zero_point, vb1);
vacc0x1 = __smlad(va0c02, vb1c02, vacc0x1);
const int16x2_t vb1c13 = __uxtab16(vb_minus_zero_point, __ror(vb1, 8));
vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1);
k -= 4 * sizeof(uint8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
const int32_t vmagic_bias_less_zero_point = params->fp32_armsimd32.magic_bias_less_zero_point;
vout0x0 = __qsub(vout0x0, vmagic_bias_less_zero_point);
vout0x1 = __qsub(vout0x1, vmagic_bias_less_zero_point);
vout0x0 = __usat(vout0x0, 8);
vout0x1 = __usat(vout0x1, 8);
const uint32_t vout0 = (uint32_t) (uint8_t) vout0x0 | ((uint32_t) vout0x1 << 8);
uint32_t vout = vout0;
const int8x4_t voutput_min = (int8x4_t) params->fp32_armsimd32.output_min;
__usub8((int8x4_t) vout, voutput_min);
vout = (uint32_t) __sel((uint8x4_t) vout, (uint8x4_t) voutput_min);
const int8x4_t voutput_max = (int8x4_t) params->fp32_armsimd32.output_max;
__usub8((int8x4_t) vout, voutput_max);
vout = (uint32_t) __sel((uint8x4_t) voutput_max, (uint8x4_t) vout);
if XNN_LIKELY(nc >= 2) {
unaligned_store_u16(c0, (uint16_t) vout);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
*c0 = (uint8_t) vout;
nc = 0;
}
} while (nc != 0);
}
| 3,615
| 29.133333
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x32c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qu8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qu8_gemm_minmax_rndnu_ukernel_1x32c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
// Loop over groups of 32 columns.
do {
// Initialize accumulators with bias. 32 bias values are loaded from the
// weight matrix, at the start of the group of 32 columns.
uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0xGHIJ = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0xKLMN = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0xOPQR = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0xSTUV = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x2_t vnacc0 = vmov_n_u32(0);
// Inner accumulation loop along the 32 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(uint8_t)) {
// Load a 1x8 block of activations.
const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
// Load a 8x32 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xGHIJ = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xKLMN = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xOPQR = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xSTUV = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567xGHIJ = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567xKLMN = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567xOPQR = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567xSTUV = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 1x8 * 8x32 --> 1x32.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vpacc0xGHIJ = vdotq_lane_u32(vpacc0xGHIJ, vb0123xGHIJ, va0x01234567, 0);
vpacc0xKLMN = vdotq_lane_u32(vpacc0xKLMN, vb0123xKLMN, va0x01234567, 0);
vpacc0xOPQR = vdotq_lane_u32(vpacc0xOPQR, vb0123xOPQR, va0x01234567, 0);
vpacc0xSTUV = vdotq_lane_u32(vpacc0xSTUV, vb0123xSTUV, va0x01234567, 0);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
vpacc0xGHIJ = vdotq_lane_u32(vpacc0xGHIJ, vb4567xGHIJ, va0x01234567, 1);
vpacc0xKLMN = vdotq_lane_u32(vpacc0xKLMN, vb4567xKLMN, va0x01234567, 1);
vpacc0xOPQR = vdotq_lane_u32(vpacc0xOPQR, vb4567xOPQR, va0x01234567, 1);
vpacc0xSTUV = vdotq_lane_u32(vpacc0xSTUV, vb4567xSTUV, va0x01234567, 1);
k -= 8 * sizeof(uint8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 1x4 block of activations.
const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
// Load a 4x32 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xGHIJ = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xKLMN = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xOPQR = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xSTUV = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 1x4 * 4x32 --> 1x32.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vpacc0xGHIJ = vdotq_lane_u32(vpacc0xGHIJ, vb0123xGHIJ, va0x01234567, 0);
vpacc0xKLMN = vdotq_lane_u32(vpacc0xKLMN, vb0123xKLMN, va0x01234567, 0);
vpacc0xOPQR = vdotq_lane_u32(vpacc0xOPQR, vb0123xOPQR, va0x01234567, 0);
vpacc0xSTUV = vdotq_lane_u32(vpacc0xSTUV, vb0123xSTUV, va0x01234567, 0);
}
// Subtract zero point from accumulators.
vnacc0 = vpadd_u32(vnacc0, vnacc0);
const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x0123));
int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0x0123));
int32x4_t vacc0xGHIJ = vreinterpretq_s32_u32(vsubq_u32(vpacc0xGHIJ, vnacc0x0123));
int32x4_t vacc0xKLMN = vreinterpretq_s32_u32(vsubq_u32(vpacc0xKLMN, vnacc0x0123));
int32x4_t vacc0xOPQR = vreinterpretq_s32_u32(vsubq_u32(vpacc0xOPQR, vnacc0x0123));
int32x4_t vacc0xSTUV = vreinterpretq_s32_u32(vsubq_u32(vpacc0xSTUV, vnacc0x0123));
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0xGHIJ = vshlq_s32(vacc0xGHIJ, vright_pre_shift);
vacc0xKLMN = vshlq_s32(vacc0xKLMN, vright_pre_shift);
vacc0xOPQR = vshlq_s32(vacc0xOPQR, vright_pre_shift);
vacc0xSTUV = vshlq_s32(vacc0xSTUV, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0xGHIJ = vqdmulhq_s32(vacc0xGHIJ, vmultiplier);
vacc0xKLMN = vqdmulhq_s32(vacc0xKLMN, vmultiplier);
vacc0xOPQR = vqdmulhq_s32(vacc0xOPQR, vmultiplier);
vacc0xSTUV = vqdmulhq_s32(vacc0xSTUV, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
vacc0xGHIJ = vrshlq_s32(vacc0xGHIJ, vright_post_shift);
vacc0xKLMN = vrshlq_s32(vacc0xKLMN, vright_post_shift);
vacc0xOPQR = vrshlq_s32(vacc0xOPQR, vright_post_shift);
vacc0xSTUV = vrshlq_s32(vacc0xSTUV, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
const int16x8_t vacc0xGHIJKLMN = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0xGHIJ), vacc0xKLMN), voutput_zero_point);
const int16x8_t vacc0xOPQRSTUV = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0xOPQR), vacc0xSTUV), voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
uint8x16_t vout0xGHIJKLMNOPQRSTUV = vqmovun_high_s16(vqmovun_s16(vacc0xGHIJKLMN), vacc0xOPQRSTUV);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
const int16x8_t vacc0xGHIJKLMN = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0xGHIJ), vqmovn_s32(vacc0xKLMN)), voutput_zero_point);
const int16x8_t vacc0xOPQRSTUV = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0xOPQR), vqmovn_s32(vacc0xSTUV)), voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
uint8x16_t vout0xGHIJKLMNOPQRSTUV = vcombine_u8(vqmovun_s16(vacc0xGHIJKLMN), vqmovun_s16(vacc0xOPQRSTUV));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
vout0xGHIJKLMNOPQRSTUV = vmaxq_u8(vout0xGHIJKLMNOPQRSTUV, voutput_min);
vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
vout0xGHIJKLMNOPQRSTUV = vminq_u8(vout0xGHIJKLMNOPQRSTUV, voutput_max);
if (nc >= 32) {
vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
vst1q_u8(c0 + 16, vout0xGHIJKLMNOPQRSTUV);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 32;
} else {
if (nc & 16) {
vst1q_u8(c0, vout0x0123456789ABCDEF); c0 += 16;
vout0x0123456789ABCDEF = vout0xGHIJKLMNOPQRSTUV;
}
uint8x8_t vout0x01234567 = vget_low_u8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_u8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_u8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_u8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_u8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,673
| 52.252101
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const uint8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,066
| 32.065041
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const int32_t vb_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const uint8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale = params->fp32_scalar_imagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2);
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
vout0x2 = math_max_s32(vout0x2, vmagic_min);
vout0x3 = math_max_s32(vout0x3, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
vout0x2 = math_min_s32(vout0x2, vmagic_max);
vout0x3 = math_min_s32(vout0x3, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
vout0x2 -= vmagic_bias_less_zero_point;
vout0x3 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,849
| 29.078125
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const uint8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,935
| 31
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const uint8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = __builtin_wasm_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = __builtin_wasm_max_f32(vfpacc0x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = __builtin_wasm_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = __builtin_wasm_min_f32(vfpacc0x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,144
| 32.699187
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qu8_gemm_minmax_rndnu_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const uint8_t* a0 = a;
uint8_t* c0 = c;
const int32_t vb_zero_point = params->rndnu_scalar.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const uint8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding;
const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding;
const int64_t vextacc0x2 = math_mulext_s32(vacc0x2, vmultiplier) + vrounding;
const int64_t vextacc0x3 = math_mulext_s32(vacc0x3, vmultiplier) + vrounding;
const uint32_t vshift = params->rndnu_scalar.shift;
int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift);
int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift);
int32_t vout0x2 = (int32_t) math_asr_s64(vextacc0x2, vshift);
int32_t vout0x3 = (int32_t) math_asr_s64(vextacc0x3, vshift);
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
vout0x0 += voutput_zero_point;
vout0x1 += voutput_zero_point;
vout0x2 += voutput_zero_point;
vout0x3 += voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,997
| 32.596639
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,150
| 33.57047
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,241
| 33.946667
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 5,184
| 33.798658
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 5,350
| 34.437086
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,152
| 33.583893
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,243
| 33.96
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const v128_t vxa0 = wasm_u16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb01), vb_zero_point);
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb01), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
const v128_t vb23 = wasm_v128_load((const uint8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb23), vb_zero_point);
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb23), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
}
}
}
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,886
| 32.244898
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const v128_t vxa0 = wasm_u16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 8), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 16), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 24), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
}
}
}
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,788
| 32.027586
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,112
| 32.418301
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,203
| 32.792208
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2s4-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2s4__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,836
| 31.516949
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2s4-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2s4__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,927
| 32.008403
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2s4-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2s4__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 3,842
| 31.567797
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2s4-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2s4__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 4,008
| 32.408333
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2s4-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2s4__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,838
| 31.533898
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2s4-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2s4__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,929
| 32.02521
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2s4__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint8_t* a0 = a;
uint8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
size_t k = kc;
do {
v128_t vxa0 = wasm_u16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb01), vb_zero_point);
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb01), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
const v128_t vb23 = wasm_v128_load((const uint8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb23), vb_zero_point);
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb23), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
w = (const uint8_t*) w + 32;
k -= 8 * sizeof(uint8_t);
} while (k != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,822
| 30.858333
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2s4__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint8_t* a0 = a;
uint8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
size_t k = kc;
do {
v128_t vxa0 = wasm_u16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 8), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 16), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 24), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
w = (const uint8_t*) w + 32;
k -= 8 * sizeof(uint8_t);
} while (k != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,724
| 30.567797
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2s4-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2s4__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,833
| 30.42623
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c2s4-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c2s4__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,924
| 30.910569
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c8-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c8__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,027
| 31.483871
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c8-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c8__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,122
| 31.464567
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c8-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 4,153
| 33.616667
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-gemm/gen/qu8-gemm-1x4c8-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_gemm_minmax_fp32_ukernel_1x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
const uint8_t* a0 = a;
uint8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 4,323
| 33.870968
| 119
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.