repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-3p2c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p2c__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 6 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 6 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,235
| 34.863014
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-4p2c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_4p2c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 8 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 8 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,682
| 32.827381
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l16c8s8r-minmax-fp32-neon-mla8-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l16c8s8r__neon_mla8_ld128(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vaccCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x16_t vi0x0123456789ABCDEF = vld1q_s8(i0); i0 += 16;
const int8x16_t vk0x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
int16x8_t vprod01234567 = vmull_s8(vget_low_s8(vi0x0123456789ABCDEF), vget_low_s8(vk0x0123456789ABCDEF));
int16x8_t vprod89ABCDEF = vmull_s8(vget_high_s8(vi0x0123456789ABCDEF), vget_high_s8(vk0x0123456789ABCDEF));
const int8x16_t vi1x0123456789ABCDEF = vld1q_s8(i1); i1 += 16;
const int8x16_t vk1x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi1x0123456789ABCDEF), vget_low_s8(vk1x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi1x0123456789ABCDEF), vget_high_s8(vk1x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi2x0123456789ABCDEF = vld1q_s8(i2); i2 += 16;
const int8x16_t vk2x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi2x0123456789ABCDEF), vget_low_s8(vk2x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi2x0123456789ABCDEF), vget_high_s8(vk2x0123456789ABCDEF));
const int8x16_t vi3x0123456789ABCDEF = vld1q_s8(i3); i3 += 16;
const int8x16_t vk3x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi3x0123456789ABCDEF), vget_low_s8(vk3x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi3x0123456789ABCDEF), vget_high_s8(vk3x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi4x0123456789ABCDEF = vld1q_s8(i4); i4 += 16;
const int8x16_t vk4x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi4x0123456789ABCDEF), vget_low_s8(vk4x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi4x0123456789ABCDEF), vget_high_s8(vk4x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c -= 8;
} while (c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
const int8x16_t vi0x0123456789ABCDEF = vld1q_s8(i0); i0 += 16;
const int8x16_t vk0x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
int16x8_t vprod01234567 = vmull_s8(vget_low_s8(vi0x0123456789ABCDEF), vget_low_s8(vk0x0123456789ABCDEF));
int16x8_t vprod89ABCDEF = vmull_s8(vget_high_s8(vi0x0123456789ABCDEF), vget_high_s8(vk0x0123456789ABCDEF));
const int8x16_t vi1x0123456789ABCDEF = vld1q_s8(i1); i1 += 16;
const int8x16_t vk1x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi1x0123456789ABCDEF), vget_low_s8(vk1x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi1x0123456789ABCDEF), vget_high_s8(vk1x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi2x0123456789ABCDEF = vld1q_s8(i2); i2 += 16;
const int8x16_t vk2x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi2x0123456789ABCDEF), vget_low_s8(vk2x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi2x0123456789ABCDEF), vget_high_s8(vk2x0123456789ABCDEF));
const int8x16_t vi3x0123456789ABCDEF = vld1q_s8(i3); i3 += 16;
const int8x16_t vk3x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi3x0123456789ABCDEF), vget_low_s8(vk3x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi3x0123456789ABCDEF), vget_high_s8(vk3x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi4x0123456789ABCDEF = vld1q_s8(i4); i4 += 16;
const int8x16_t vk4x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi4x0123456789ABCDEF), vget_low_s8(vk4x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi4x0123456789ABCDEF), vget_high_s8(vk4x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c -= 8;
} while (c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
int32x4_t vacc89AB = vld1q_s32(b); b += 4;
int32x4_t vaccCDEF = vld1q_s32(b); b += 4;
const int8x16_t vi0x0123456789ABCDEF = vld1q_s8(i0); i0 += 16;
const int8x16_t vk0x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
int16x8_t vprod01234567 = vmull_s8(vget_low_s8(vi0x0123456789ABCDEF), vget_low_s8(vk0x0123456789ABCDEF));
int16x8_t vprod89ABCDEF = vmull_s8(vget_high_s8(vi0x0123456789ABCDEF), vget_high_s8(vk0x0123456789ABCDEF));
const int8x16_t vi1x0123456789ABCDEF = vld1q_s8(i1); i1 += 16;
const int8x16_t vk1x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi1x0123456789ABCDEF), vget_low_s8(vk1x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi1x0123456789ABCDEF), vget_high_s8(vk1x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi2x0123456789ABCDEF = vld1q_s8(i2); i2 += 16;
const int8x16_t vk2x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi2x0123456789ABCDEF), vget_low_s8(vk2x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi2x0123456789ABCDEF), vget_high_s8(vk2x0123456789ABCDEF));
const int8x16_t vi3x0123456789ABCDEF = vld1q_s8(i3); i3 += 16;
const int8x16_t vk3x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi3x0123456789ABCDEF), vget_low_s8(vk3x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi3x0123456789ABCDEF), vget_high_s8(vk3x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi4x0123456789ABCDEF = vld1q_s8(i4); i4 += 16;
const int8x16_t vk4x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi4x0123456789ABCDEF), vget_low_s8(vk4x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi4x0123456789ABCDEF), vget_high_s8(vk4x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale89AB);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscaleCDEF);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,432
| 42.394444
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l16c8s8r-minmax-fp32-neon-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l16c8s8r__neon_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vaccCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
int16x8_t vprod89ABCDEF = vmull_s8(vi0x89ABCDEF, vk0x89ABCDEF);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi1x89ABCDEF, vk1x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vprod89ABCDEF = vmull_s8(vi2x89ABCDEF, vk2x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi3x89ABCDEF, vk3x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vprod89ABCDEF = vmull_s8(vi4x89ABCDEF, vk4x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c -= 8;
} while (c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
int16x8_t vprod89ABCDEF = vmull_s8(vi0x89ABCDEF, vk0x89ABCDEF);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi1x89ABCDEF, vk1x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vprod89ABCDEF = vmull_s8(vi2x89ABCDEF, vk2x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi3x89ABCDEF, vk3x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vprod89ABCDEF = vmull_s8(vi4x89ABCDEF, vk4x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c -= 8;
} while (c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
int32x4_t vacc89AB = vld1q_s32(b); b += 4;
int32x4_t vaccCDEF = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
int16x8_t vprod89ABCDEF = vmull_s8(vi0x89ABCDEF, vk0x89ABCDEF);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi1x89ABCDEF, vk1x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vprod89ABCDEF = vmull_s8(vi2x89ABCDEF, vk2x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi3x89ABCDEF, vk3x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vprod89ABCDEF = vmull_s8(vi4x89ABCDEF, vk4x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale89AB);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscaleCDEF);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,866
| 40.87193
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l16c8s8r-minmax-fp32-neonv8-mla8-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l16c8s8r__neonv8_mla8_ld128(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vaccCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x16_t vi0x0123456789ABCDEF = vld1q_s8(i0); i0 += 16;
const int8x16_t vk0x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
int16x8_t vprod01234567 = vmull_s8(vget_low_s8(vi0x0123456789ABCDEF), vget_low_s8(vk0x0123456789ABCDEF));
int16x8_t vprod89ABCDEF = vmull_s8(vget_high_s8(vi0x0123456789ABCDEF), vget_high_s8(vk0x0123456789ABCDEF));
const int8x16_t vi1x0123456789ABCDEF = vld1q_s8(i1); i1 += 16;
const int8x16_t vk1x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi1x0123456789ABCDEF), vget_low_s8(vk1x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi1x0123456789ABCDEF), vget_high_s8(vk1x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi2x0123456789ABCDEF = vld1q_s8(i2); i2 += 16;
const int8x16_t vk2x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi2x0123456789ABCDEF), vget_low_s8(vk2x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi2x0123456789ABCDEF), vget_high_s8(vk2x0123456789ABCDEF));
const int8x16_t vi3x0123456789ABCDEF = vld1q_s8(i3); i3 += 16;
const int8x16_t vk3x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi3x0123456789ABCDEF), vget_low_s8(vk3x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi3x0123456789ABCDEF), vget_high_s8(vk3x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi4x0123456789ABCDEF = vld1q_s8(i4); i4 += 16;
const int8x16_t vk4x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi4x0123456789ABCDEF), vget_low_s8(vk4x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi4x0123456789ABCDEF), vget_high_s8(vk4x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c -= 8;
} while (c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
const int8x16_t vi0x0123456789ABCDEF = vld1q_s8(i0); i0 += 16;
const int8x16_t vk0x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
int16x8_t vprod01234567 = vmull_s8(vget_low_s8(vi0x0123456789ABCDEF), vget_low_s8(vk0x0123456789ABCDEF));
int16x8_t vprod89ABCDEF = vmull_s8(vget_high_s8(vi0x0123456789ABCDEF), vget_high_s8(vk0x0123456789ABCDEF));
const int8x16_t vi1x0123456789ABCDEF = vld1q_s8(i1); i1 += 16;
const int8x16_t vk1x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi1x0123456789ABCDEF), vget_low_s8(vk1x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi1x0123456789ABCDEF), vget_high_s8(vk1x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi2x0123456789ABCDEF = vld1q_s8(i2); i2 += 16;
const int8x16_t vk2x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi2x0123456789ABCDEF), vget_low_s8(vk2x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi2x0123456789ABCDEF), vget_high_s8(vk2x0123456789ABCDEF));
const int8x16_t vi3x0123456789ABCDEF = vld1q_s8(i3); i3 += 16;
const int8x16_t vk3x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi3x0123456789ABCDEF), vget_low_s8(vk3x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi3x0123456789ABCDEF), vget_high_s8(vk3x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi4x0123456789ABCDEF = vld1q_s8(i4); i4 += 16;
const int8x16_t vk4x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi4x0123456789ABCDEF), vget_low_s8(vk4x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi4x0123456789ABCDEF), vget_high_s8(vk4x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c -= 8;
} while (c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
int32x4_t vacc89AB = vld1q_s32(b); b += 4;
int32x4_t vaccCDEF = vld1q_s32(b); b += 4;
const int8x16_t vi0x0123456789ABCDEF = vld1q_s8(i0); i0 += 16;
const int8x16_t vk0x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
int16x8_t vprod01234567 = vmull_s8(vget_low_s8(vi0x0123456789ABCDEF), vget_low_s8(vk0x0123456789ABCDEF));
int16x8_t vprod89ABCDEF = vmull_s8(vget_high_s8(vi0x0123456789ABCDEF), vget_high_s8(vk0x0123456789ABCDEF));
const int8x16_t vi1x0123456789ABCDEF = vld1q_s8(i1); i1 += 16;
const int8x16_t vk1x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi1x0123456789ABCDEF), vget_low_s8(vk1x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi1x0123456789ABCDEF), vget_high_s8(vk1x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi2x0123456789ABCDEF = vld1q_s8(i2); i2 += 16;
const int8x16_t vk2x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi2x0123456789ABCDEF), vget_low_s8(vk2x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi2x0123456789ABCDEF), vget_high_s8(vk2x0123456789ABCDEF));
const int8x16_t vi3x0123456789ABCDEF = vld1q_s8(i3); i3 += 16;
const int8x16_t vk3x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi3x0123456789ABCDEF), vget_low_s8(vk3x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi3x0123456789ABCDEF), vget_high_s8(vk3x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi4x0123456789ABCDEF = vld1q_s8(i4); i4 += 16;
const int8x16_t vk4x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi4x0123456789ABCDEF), vget_low_s8(vk4x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi4x0123456789ABCDEF), vget_high_s8(vk4x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale89AB);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscaleCDEF);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,062
| 41.947858
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l16c8s8r-minmax-fp32-neonv8-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l16c8s8r__neonv8_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vaccCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
int16x8_t vprod89ABCDEF = vmull_s8(vi0x89ABCDEF, vk0x89ABCDEF);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi1x89ABCDEF, vk1x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vprod89ABCDEF = vmull_s8(vi2x89ABCDEF, vk2x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi3x89ABCDEF, vk3x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vprod89ABCDEF = vmull_s8(vi4x89ABCDEF, vk4x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c -= 8;
} while (c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
int16x8_t vprod89ABCDEF = vmull_s8(vi0x89ABCDEF, vk0x89ABCDEF);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi1x89ABCDEF, vk1x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vprod89ABCDEF = vmull_s8(vi2x89ABCDEF, vk2x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi3x89ABCDEF, vk3x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vprod89ABCDEF = vmull_s8(vi4x89ABCDEF, vk4x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c -= 8;
} while (c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
int32x4_t vacc89AB = vld1q_s32(b); b += 4;
int32x4_t vaccCDEF = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
int16x8_t vprod89ABCDEF = vmull_s8(vi0x89ABCDEF, vk0x89ABCDEF);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi1x89ABCDEF, vk1x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vprod89ABCDEF = vmull_s8(vi2x89ABCDEF, vk2x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi3x89ABCDEF, vk3x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vprod89ABCDEF = vmull_s8(vi4x89ABCDEF, vk4x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale89AB);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscaleCDEF);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
do {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,496
| 40.440917
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l1c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l1c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,458
| 33.373272
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l1c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l1c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,346
| 32.701835
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l1c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l1c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,362
| 32.930876
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l1c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l1c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 7,476
| 33.456221
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l2c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l2c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 5 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,140
| 32.911271
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l2c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l2c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 5 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,963
| 32.168646
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l2c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l2c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 5 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,036
| 32.661871
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l2c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l2c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 5 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,198
| 33.05036
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l4c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l4c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
const float vscale2 = unaligned_indexed_load_f32(w, 2);
const float vscale3 = unaligned_indexed_load_f32(w, 3);
w = (const void*) ((const float*) w + 4);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,317
| 36.625926
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l4c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l4c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
const float vscale2 = unaligned_indexed_load_f32(w, 2);
const float vscale3 = unaligned_indexed_load_f32(w, 3);
w = (const void*) ((const float*) w + 4);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2);
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout2 = math_max_s32(vout2, vmagic_min);
vout3 = math_max_s32(vout3, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout2 = math_min_s32(vout2, vmagic_max);
vout3 = math_min_s32(vout3, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
vout2 -= vmagic_bias_less_zero_point;
vout3 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,076
| 35.771062
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l4c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l4c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
const float vscale2 = unaligned_indexed_load_f32(w, 2);
const float vscale3 = unaligned_indexed_load_f32(w, 3);
w = (const void*) ((const float*) w + 4);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,205
| 36.418519
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l4c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l4c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
const float vscale2 = unaligned_indexed_load_f32(w, 2);
const float vscale3 = unaligned_indexed_load_f32(w, 3);
w = (const void*) ((const float*) w + 4);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = __builtin_wasm_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = __builtin_wasm_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = __builtin_wasm_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = __builtin_wasm_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,415
| 36.807407
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l8c8s8r-minmax-fp32-neon-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__neon_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,119
| 35.298201
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l8c8s8r-minmax-fp32-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,356
| 40.617886
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l8c8s8r-minmax-fp32-neon-mul8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__neon_mul8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,119
| 36.333333
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l8c8s8r-minmax-fp32-neonv8-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__neonv8_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,827
| 34.823834
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l8c8s8r-minmax-fp32-neonv8-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__neonv8_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,064
| 40.161202
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l8c8s8r-minmax-fp32-neonv8-mul8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__neonv8_mul8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,827
| 35.885572
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l8c8s8r-minmax-fp32-sse2-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__sse2_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
const __m128i vsignprod1x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod1x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod1x01234567));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
const __m128i vsignprod3x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod3x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod3x01234567));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vsignprod4x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod4x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod4x01234567));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) b, vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
const __m128i vsignprod1x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod1x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod1x01234567));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
const __m128i vsignprod3x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod3x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod3x01234567));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vsignprod4x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod4x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod4x01234567));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) (b), vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
const __m128i vsignprod1x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod1x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod1x01234567));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
const __m128i vsignprod3x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod3x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod3x01234567));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vsignprod4x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod4x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod4x01234567));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
w = (const void*) ((const float*) w + 8);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
const __m128i vsignprod1x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod1x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod1x01234567));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
const __m128i vsignprod3x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod3x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod3x01234567));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vsignprod4x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod4x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod4x01234567));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale0123 = _mm_loadu_ps((const float*) ((uintptr_t) w + 40 * sizeof(int8_t)));
const __m128 vscale4567 = _mm_loadu_ps((const float*) ((uintptr_t) w + 40 * sizeof(int8_t) + 4 * sizeof(float)));
vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 22,318
| 44.642127
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l8c8s8r-minmax-fp32-sse2-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__sse2_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) b, vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) (b), vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
w = (const void*) ((const float*) w + 8);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale0123 = _mm_loadu_ps((const float*) ((uintptr_t) w + 40 * sizeof(int8_t)));
const __m128 vscale4567 = _mm_loadu_ps((const float*) ((uintptr_t) w + 40 * sizeof(int8_t) + 4 * sizeof(float)));
vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,708
| 47.165692
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l8c8s8r-minmax-fp32-sse41-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__sse41_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) b, vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) (b), vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
w = (const void*) ((const float*) w + 8);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale0123 = _mm_loadu_ps((const float*) ((uintptr_t) w + 40 * sizeof(int8_t)));
const __m128 vscale4567 = _mm_loadu_ps((const float*) ((uintptr_t) w + 40 * sizeof(int8_t) + 4 * sizeof(float)));
vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,777
| 40.463312
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-5f5m5l8c8s8r-minmax-fp32-sse41-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__sse41_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) b, vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) (b), vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
w = (const void*) ((const float*) w + 8);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale0123 = _mm_loadu_ps((const float*) ((uintptr_t) w + 40 * sizeof(int8_t)));
const __m128 vscale4567 = _mm_loadu_ps((const float*) ((uintptr_t) w + 40 * sizeof(int8_t) + 4 * sizeof(float)));
vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,083
| 41.766734
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l1c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l1c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,674
| 33.839357
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l1c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l1c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,562
| 33.252
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l1c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l1c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,578
| 33.453815
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l1c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l1c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,692
| 33.911647
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l2c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l2c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
w = (const void*) ((uintptr_t) w + 14 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 7 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,696
| 33.145194
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l2c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l2c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
w = (const void*) ((uintptr_t) w + 14 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 7 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,519
| 32.509128
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l2c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l2c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
w = (const void*) ((uintptr_t) w + 14 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 7 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,592
| 32.932515
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l2c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l2c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
w = (const void*) ((uintptr_t) w + 14 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 7 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,754
| 33.263804
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l4c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l4c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) w)[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) w)[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
w = (const void*) ((uintptr_t) w + 28 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
const float vscale2 = unaligned_indexed_load_f32(w, 2);
const float vscale3 = unaligned_indexed_load_f32(w, 3);
w = (const void*) ((const float*) w + 4);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,129
| 36.940252
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l4c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l4c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) w)[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) w)[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
w = (const void*) ((uintptr_t) w + 28 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
const float vscale2 = unaligned_indexed_load_f32(w, 2);
const float vscale3 = unaligned_indexed_load_f32(w, 3);
w = (const void*) ((const float*) w + 4);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2);
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout2 = math_max_s32(vout2, vmagic_min);
vout3 = math_max_s32(vout3, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout2 = math_min_s32(vout2, vmagic_max);
vout3 = math_min_s32(vout3, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
vout2 -= vmagic_bias_less_zero_point;
vout3 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,888
| 36.21028
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l4c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l4c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) w)[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) w)[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
w = (const void*) ((uintptr_t) w + 28 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
const float vscale2 = unaligned_indexed_load_f32(w, 2);
const float vscale3 = unaligned_indexed_load_f32(w, 3);
w = (const void*) ((const float*) w + 4);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,017
| 36.764151
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l4c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l4c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) w)[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) w)[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
w = (const void*) ((uintptr_t) w + 28 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
const float vscale2 = unaligned_indexed_load_f32(w, 2);
const float vscale3 = unaligned_indexed_load_f32(w, 3);
w = (const void*) ((const float*) w + 4);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = __builtin_wasm_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = __builtin_wasm_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = __builtin_wasm_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = __builtin_wasm_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,227
| 37.09434
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l8c8s8r-minmax-fp32-neon-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l8c8s8r__neon_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,353
| 35.58613
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l8c8s8r-minmax-fp32-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 18,172
| 41.76
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l8c8s8r-minmax-fp32-neon-mul8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l8c8s8r__neon_mul8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,853
| 36.906582
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l8c8s8r-minmax-fp32-neonv8-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l8c8s8r__neonv8_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,061
| 35.175676
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l8c8s8r-minmax-fp32-neonv8-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l8c8s8r__neonv8_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,880
| 41.372038
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l8c8s8r-minmax-fp32-neonv8-mul8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l8c8s8r__neonv8_mul8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,561
| 36.525641
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-6f6m7l8c8s8r-minmax-fp32-sse41-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_6f6m7l8c8s8r__sse41_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) b, vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 48 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) (b), vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
i6 += 8;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 56 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
w = (const void*) ((const float*) w + 8);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale0123 = _mm_loadu_ps((const float*) ((uintptr_t) w + 56 * sizeof(int8_t)));
const __m128 vscale4567 = _mm_loadu_ps((const float*) ((uintptr_t) w + 56 * sizeof(int8_t) + 4 * sizeof(float)));
vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,497
| 41.035778
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l1c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l1c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) w)[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + 9 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,514
| 34.40404
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l1c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l1c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) w)[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + 9 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,402
| 33.909396
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l1c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l1c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) w)[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + 9 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,418
| 34.080808
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l1c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l1c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) w)[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + 9 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,532
| 34.464646
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l2c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l2c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) w)[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) w)[8];
vacc += vi8 * vk8;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 9 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,584
| 33.480737
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l2c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l2c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) w)[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) w)[8];
vacc += vi8 * vk8;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 9 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,407
| 32.956739
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l2c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l2c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) w)[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) w)[8];
vacc += vi8 * vk8;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 9 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,480
| 33.306533
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l2c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l2c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) w)[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) w)[8];
vacc += vi8 * vk8;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 9 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,642
| 33.577889
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l8c8s8r-minmax-fp32-neon-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l8c8s8r__neon_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7);
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8);
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,613
| 35.937853
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l8c8s8r-minmax-fp32-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7));
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8));
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 22,042
| 42.823062
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l8c8s8r-minmax-fp32-neon-mul8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l8c8s8r__neon_mul8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7);
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8);
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,613
| 37.390764
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l8c8s8r-minmax-fp32-neonv8-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l8c8s8r__neonv8_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7);
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8);
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,321
| 35.594697
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l8c8s8r-minmax-fp32-neonv8-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l8c8s8r__neonv8_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7));
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8));
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,750
| 42.502
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-8f8m9l8c8s8r-minmax-fp32-neonv8-mul8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_8f8m9l8c8s8r__neonv8_mul8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7);
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8);
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,321
| 37.075
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-9p1c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p1c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,828
| 35.037313
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-9p1c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p1c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,714
| 33.925926
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-9p1c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p1c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,732
| 34.320896
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-9p1c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p1c__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(int8_t));
const float vscale = unaligned_load_f32(w);
w = (const void*) ((const float*) w + 1);
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,846
| 35.171642
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-9p2c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p2c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,319
| 35.692913
| 121
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-9p2c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p2c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,136
| 34.414729
| 121
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-9p2c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p2c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,215
| 35.283465
| 121
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-9p2c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p2c__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const void*) ((const float*) w + 2);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,377
| 35.92126
| 121
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-9p4c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p4c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
const int32_t vi7x2 = (int32_t) i7[2];
const int32_t vi7x3 = (int32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29];
const int32_t vk7x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30];
const int32_t vk7x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
const int32_t vi8x2 = (int32_t) i8[2];
const int32_t vi8x3 = (int32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33];
const int32_t vk8x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34];
const int32_t vk8x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
const float vscale0 = ((const float*) w)[0];
const float vscale1 = ((const float*) w)[1];
const float vscale2 = ((const float*) w)[2];
const float vscale3 = ((const float*) w)[3];
w = (const void*) ((const float*) w + 4);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) k[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) k[4];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) k[8];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) k[12];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) k[16];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) k[20];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) k[24];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = (int32_t) k[28];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = (int32_t) k[32];
vacc += vi8 * vk8;
k += 1;
const float vscale = *((const float*) ((uintptr_t) w + 3 * sizeof(int32_t) + 36 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,073
| 38.618182
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-9p4c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p4c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
const int32_t vi7x2 = (int32_t) i7[2];
const int32_t vi7x3 = (int32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29];
const int32_t vk7x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30];
const int32_t vk7x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
const int32_t vi8x2 = (int32_t) i8[2];
const int32_t vi8x3 = (int32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33];
const int32_t vk8x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34];
const int32_t vk8x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
const float vscale0 = ((const float*) w)[0];
const float vscale1 = ((const float*) w)[1];
const float vscale2 = ((const float*) w)[2];
const float vscale3 = ((const float*) w)[3];
w = (const void*) ((const float*) w + 4);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2);
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout2 = math_max_s32(vout2, vmagic_min);
vout3 = math_max_s32(vout3, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout2 = math_min_s32(vout2, vmagic_max);
vout3 = math_min_s32(vout3, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
vout2 -= vmagic_bias_less_zero_point;
vout3 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) k[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) k[4];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) k[8];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) k[12];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) k[16];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) k[20];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) k[24];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = (int32_t) k[28];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = (int32_t) k[32];
vacc += vi8 * vk8;
k += 1;
const float vscale = *((const float*) ((uintptr_t) w + 3 * sizeof(int32_t) + 36 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,822
| 37.16369
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-9p4c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p4c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
const int32_t vi7x2 = (int32_t) i7[2];
const int32_t vi7x3 = (int32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29];
const int32_t vk7x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30];
const int32_t vk7x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
const int32_t vi8x2 = (int32_t) i8[2];
const int32_t vi8x3 = (int32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33];
const int32_t vk8x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34];
const int32_t vk8x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
const float vscale0 = ((const float*) w)[0];
const float vscale1 = ((const float*) w)[1];
const float vscale2 = ((const float*) w)[2];
const float vscale3 = ((const float*) w)[3];
w = (const void*) ((const float*) w + 4);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) k[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) k[4];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) k[8];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) k[12];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) k[16];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) k[20];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) k[24];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = (int32_t) k[28];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = (int32_t) k[32];
vacc += vi8 * vk8;
k += 1;
const float vscale = *((const float*) ((uintptr_t) w + 3 * sizeof(int32_t) + 36 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,961
| 38.278788
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-9p4c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_9p4c__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
const int32_t vi7x2 = (int32_t) i7[2];
const int32_t vi7x3 = (int32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29];
const int32_t vk7x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30];
const int32_t vk7x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
const int32_t vi8x2 = (int32_t) i8[2];
const int32_t vi8x3 = (int32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33];
const int32_t vk8x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34];
const int32_t vk8x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
const float vscale0 = ((const float*) w)[0];
const float vscale1 = ((const float*) w)[1];
const float vscale2 = ((const float*) w)[2];
const float vscale3 = ((const float*) w)[3];
w = (const void*) ((const float*) w + 4);
vfpacc0 *= vscale0;
vfpacc1 *= vscale1;
vfpacc2 *= vscale2;
vfpacc3 *= vscale3;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = __builtin_wasm_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = __builtin_wasm_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = __builtin_wasm_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = __builtin_wasm_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) k[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) k[4];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) k[8];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) k[12];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) k[16];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) k[20];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) k[24];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = (int32_t) k[28];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = (int32_t) k[32];
vacc += vi8 * vk8;
k += 1;
const float vscale = *((const float*) ((uintptr_t) w + 3 * sizeof(int32_t) + 36 * sizeof(int8_t)));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,171
| 38.915152
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x16-minmax-fp32-neon-mlal-lane-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/prefetch.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x16__neon_mlal_lane_prfm(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
xnn_prefetch_to_l1((const int8_t*) w + 448);
xnn_prefetch_to_l1((const int8_t*) w + 512);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale89AB);
const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscaleCDEF);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
vacc0x89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x89AB, vmagic_bias));
vacc0xCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpacc0xCDEF, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
vacc0x89AB = vqsubq_s32(vacc0x89AB, vmagic_bias_less_output_zero_point);
vacc0xCDEF = vqsubq_s32(vacc0xCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,589
| 53.572368
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x16-minmax-fp32-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x16__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale89AB);
const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscaleCDEF);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
vacc0x89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x89AB, vmagic_bias));
vacc0xCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpacc0xCDEF, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
vacc0x89AB = vqsubq_s32(vacc0x89AB, vmagic_bias_less_output_zero_point);
vacc0xCDEF = vqsubq_s32(vacc0xCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,452
| 53.66113
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x16-minmax-fp32-neonv8-mlal-lane-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/prefetch.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x16__neonv8_mlal_lane_prfm(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
xnn_prefetch_to_l1((const int8_t*) w + 448);
xnn_prefetch_to_l1((const int8_t*) w + 512);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale89AB);
const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscaleCDEF);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
vacc0x89AB = vcvtnq_s32_f32(vfpacc0x89AB);
vacc0xCDEF = vcvtnq_s32_f32(vfpacc0xCDEF);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,367
| 53.019802
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x16-minmax-fp32-neonv8-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x16__neonv8_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale89AB);
const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscaleCDEF);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
vacc0x89AB = vcvtnq_s32_f32(vfpacc0x89AB);
vacc0xCDEF = vcvtnq_s32_f32(vfpacc0xCDEF);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,230
| 53.103333
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x16c4-minmax-fp32-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
// Loop over groups of 16 columns.
do {
// Initialize accumulators with bias. 16 bias values are loaded from the
// weight matrix, at the start of the group of 16 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 1x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
// Load a 8x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x8 * 8x16 --> 1x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 1x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
// Load a 4x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x4 * 4x16 --> 1x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vscale89AB = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale89AB);
const float32x4_t vscaleCDEF = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscaleCDEF);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
vacc0x89AB = vcvtnq_s32_f32(vfpacc0x89AB);
vacc0xCDEF = vcvtnq_s32_f32(vfpacc0xCDEF);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
// Main case where there the 16 columns fit in the destination.
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
// Advance to the next 16 columns.
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,281
| 41.584795
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x1c4-minmax-fp32-armsimd32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x1c4__armsimd32(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const float vmagic_bias = params->fp32_armsimd32.magic_bias;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
w = (const void*) ((const int32_t*) w + 1);
size_t k = kc;
do {
const int8x4_t va0 = (int8x4_t) unaligned_load_s32(a0); a0 += 4;
const int16x2_t va0c02 = __sxtb16(va0);
const int16x2_t va0c13 = __sxtb16(__ror(va0, 8));
const int8x4_t vb0 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb0c02 = __sxtb16(vb0);
vacc0x0 = __smlad(va0c02, vb0c02, vacc0x0);
const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8));
vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0);
k -= 4 * sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
const float vscale0 = ((const float*) w)[0];
vfpacc0x0 *= vscale0;
w = (const void*) ((const float*) w + 1);
vfpacc0x0 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
const int32_t vmagic_bias_less_zero_point = params->fp32_armsimd32.magic_bias_less_zero_point;
vout0x0 = __qsub(vout0x0, vmagic_bias_less_zero_point);
vout0x0 = __ssat(vout0x0, 8);
const uint32_t vout0 = (uint32_t) vout0x0;
uint32_t vout = vout0;
const int8x4_t voutput_min = (int8x4_t) params->fp32_armsimd32.output_min;
__ssub8((int8x4_t) vout, voutput_min);
vout = (uint32_t) __sel((uint8x4_t) vout, (uint8x4_t) voutput_min);
const int8x4_t voutput_max = (int8x4_t) params->fp32_armsimd32.output_max;
__ssub8((int8x4_t) vout, voutput_max);
vout = (uint32_t) __sel((uint8x4_t) voutput_max, (uint8x4_t) vout);
*c0 = (int8_t) vout;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 1;
} while (nc != 0);
}
| 2,737
| 26.38
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x2-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
vfpacc0x0 *= vscale0;
const float vscale1 = unaligned_indexed_load_f32(w, 1);
vfpacc0x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,882
| 28.121212
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x2-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
vfpacc0x0 *= vscale0;
const float vscale1 = unaligned_indexed_load_f32(w, 1);
vfpacc0x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,735
| 25.823529
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x2-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
vfpacc0x0 *= vscale0;
const float vscale1 = unaligned_indexed_load_f32(w, 1);
vfpacc0x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,775
| 27.040404
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x2-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x2__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
w = (const int32_t*) w + 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
vfpacc0x0 *= vscale0;
const float vscale1 = unaligned_indexed_load_f32(w, 1);
vfpacc0x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 2,920
| 28.505051
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x2c4-minmax-fp32-armsimd32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x2c4__armsimd32(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
const float vmagic_bias = params->fp32_armsimd32.magic_bias;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
w = (const void*) ((const int32_t*) w + 2);
size_t k = kc;
do {
const int8x4_t va0 = (int8x4_t) unaligned_load_s32(a0); a0 += 4;
const int16x2_t va0c02 = __sxtb16(va0);
const int16x2_t va0c13 = __sxtb16(__ror(va0, 8));
const int8x4_t vb0 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb0c02 = __sxtb16(vb0);
vacc0x0 = __smlad(va0c02, vb0c02, vacc0x0);
const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8));
vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0);
const int8x4_t vb1 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb1c02 = __sxtb16(vb1);
vacc0x1 = __smlad(va0c02, vb1c02, vacc0x1);
const int16x2_t vb1c13 = __sxtb16(__ror(vb1, 8));
vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1);
k -= 4 * sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
const float vscale0 = ((const float*) w)[0];
vfpacc0x0 *= vscale0;
const float vscale1 = ((const float*) w)[1];
vfpacc0x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
const int32_t vmagic_bias_less_zero_point = params->fp32_armsimd32.magic_bias_less_zero_point;
vout0x0 = __qsub(vout0x0, vmagic_bias_less_zero_point);
vout0x1 = __qsub(vout0x1, vmagic_bias_less_zero_point);
vout0x0 = __ssat(vout0x0, 8);
vout0x1 = __ssat(vout0x1, 8);
const uint32_t vout0 = (uint32_t) (uint8_t) vout0x0 | ((uint32_t) vout0x1 << 8);
uint32_t vout = vout0;
const int8x4_t voutput_min = (int8x4_t) params->fp32_armsimd32.output_min;
__ssub8((int8x4_t) vout, voutput_min);
vout = (uint32_t) __sel((uint8x4_t) vout, (uint8x4_t) voutput_min);
const int8x4_t voutput_max = (int8x4_t) params->fp32_armsimd32.output_max;
__ssub8((int8x4_t) vout, voutput_max);
vout = (uint32_t) __sel((uint8x4_t) voutput_max, (uint8x4_t) vout);
if XNN_LIKELY(nc >= 2) {
unaligned_store_u16(c0, (uint16_t) vout);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 2;
} else {
*c0 = (int8_t) vout;
nc = 0;
}
} while (nc != 0);
}
| 3,521
| 28.107438
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale0 = ((const float*) w)[0];
vfpacc0x0 *= vscale0;
const float vscale1 = ((const float*) w)[1];
vfpacc0x1 *= vscale1;
const float vscale2 = ((const float*) w)[2];
vfpacc0x2 *= vscale2;
const float vscale3 = ((const float*) w)[3];
vfpacc0x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,047
| 31.126984
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale0 = ((const float*) w)[0];
vfpacc0x0 *= vscale0;
const float vscale1 = ((const float*) w)[1];
vfpacc0x1 *= vscale1;
const float vscale2 = ((const float*) w)[2];
vfpacc0x2 *= vscale2;
const float vscale3 = ((const float*) w)[3];
vfpacc0x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2);
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
vout0x2 = math_max_s32(vout0x2, vmagic_min);
vout0x3 = math_max_s32(vout0x3, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
vout0x2 = math_min_s32(vout0x2, vmagic_max);
vout0x3 = math_min_s32(vout0x3, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
vout0x2 -= vmagic_bias_less_zero_point;
vout0x3 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,830
| 28.244275
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale0 = ((const float*) w)[0];
vfpacc0x0 *= vscale0;
const float vscale1 = ((const float*) w)[1];
vfpacc0x1 *= vscale1;
const float vscale2 = ((const float*) w)[2];
vfpacc0x2 *= vscale2;
const float vscale3 = ((const float*) w)[3];
vfpacc0x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 3,916
| 30.087302
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
w = (const int32_t*) w + 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
const float vscale0 = ((const float*) w)[0];
vfpacc0x0 *= vscale0;
const float vscale1 = ((const float*) w)[1];
vfpacc0x1 *= vscale1;
const float vscale2 = ((const float*) w)[2];
vfpacc0x2 *= vscale2;
const float vscale3 = ((const float*) w)[3];
vfpacc0x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = __builtin_wasm_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = __builtin_wasm_max_f32(vfpacc0x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = __builtin_wasm_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = __builtin_wasm_min_f32(vfpacc0x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,125
| 31.746032
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4c2-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c2__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,850
| 31.557047
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4c2-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c2__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,964
| 31.880795
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4c2-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c2__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 5,149
| 32.881579
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4c2-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c2__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 5,239
| 33.473684
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4c2-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c2__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,852
| 31.57047
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4c2-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c2__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,966
| 31.89404
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c2__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01);
const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23);
const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const v128_t vxb0 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(int8_t)) {
const v128_t vxb1 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(int8_t)) {
const v128_t vxb2 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
}
}
}
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale0123 = wasm_v128_load(w);
w = (const void*) ((const float*) w + 4);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,583
| 30.183673
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4c2-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c2__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
a0 += 8;
const v128_t vxb0 = wasm_i16x8_load8x8(w);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const v128_t vxb0 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(int8_t)) {
const v128_t vxb1 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(int8_t)) {
const v128_t vxb2 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
}
}
}
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale0123 = wasm_v128_load(w);
w = (const void*) ((const float*) w + 4);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,483
| 29.924138
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4c2-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c2__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,812
| 30.457516
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4c2-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c2__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
}
}
}
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,926
| 30.787097
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4c2s4-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c2s4__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,633
| 29.79661
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-gemm/gen/qs8-qc8w-gemm-1x4c2s4-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_gemm_minmax_fp32_ukernel_1x4c2s4__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,747
| 30.233333
| 108
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.