repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l4c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l4c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,835
| 36.21576
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l4c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l4c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = __builtin_wasm_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = __builtin_wasm_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = __builtin_wasm_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = __builtin_wasm_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,045
| 36.609756
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l8c8s8r-minmax-fp32-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,019
| 40.150685
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l8c8s8r-minmax-fp32-neonv8-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__neonv8_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,729
| 39.690608
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l8c8s8r-minmax-fp32-sse2-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__sse2_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
const __m128i vsignprod1x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod1x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod1x01234567));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
const __m128i vsignprod3x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod3x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod3x01234567));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vsignprod4x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod4x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod4x01234567));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) b, vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
const __m128i vsignprod1x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod1x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod1x01234567));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
const __m128i vsignprod3x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod3x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod3x01234567));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vsignprod4x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod4x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod4x01234567));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) (b), vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
const __m128i vsignprod1x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod1x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod1x01234567));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
const __m128i vsignprod3x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod3x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod3x01234567));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vsignprod4x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod4x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod4x01234567));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
const __m128i vsignprod1x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod1x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod1x01234567));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
const __m128i vsignprod3x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod3x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod3x01234567));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vsignprod4x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod4x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod4x01234567));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 22,016
| 44.302469
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l8c8s8r-minmax-fp32-sse2-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__sse2_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) b, vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) (b), vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,406
| 46.856863
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l8c8s8r-minmax-fp32-sse41-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__sse41_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) b, vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) (b), vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,475
| 40.088608
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l8c8s8r-minmax-fp32-sse41-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__sse41_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) b, vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) (b), vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 40 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,781
| 41.412245
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l8c8s8r-minmax-rndnu-neon-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_5f5m5l8c8s8r__neon_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 13,706
| 34.788512
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l8c8s8r-minmax-rndnu-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_5f5m5l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,943
| 40.168044
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-5f5m5l8c8s8r-minmax-rndnu-neon-mul8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_5f5m5l8c8s8r__neon_mul8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,706
| 35.859649
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l1c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l1c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,619
| 33.758065
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l1c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l1c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,507
| 33.168675
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l1c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l1c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,523
| 33.370968
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l1c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l1c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,637
| 33.830645
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l2c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l2c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
w = (const void*) ((uintptr_t) w + 14 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,461
| 32.942268
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l2c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l2c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
w = (const void*) ((uintptr_t) w + 14 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,284
| 32.302658
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l2c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l2c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
w = (const void*) ((uintptr_t) w + 14 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,357
| 32.727835
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l2c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l2c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 12 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
w = (const void*) ((uintptr_t) w + 14 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,519
| 33.061856
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l4c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l4c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) w)[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) w)[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
w = (const void*) ((uintptr_t) w + 28 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,759
| 36.774245
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l4c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l4c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) w)[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) w)[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
w = (const void*) ((uintptr_t) w + 28 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2);
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout2 = math_max_s32(vout2, vmagic_min);
vout3 = math_max_s32(vout3, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout2 = math_min_s32(vout2, vmagic_max);
vout3 = math_min_s32(vout3, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
vout2 -= vmagic_bias_less_zero_point;
vout3 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,518
| 36.037795
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l4c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l4c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) w)[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) w)[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
w = (const void*) ((uintptr_t) w + 28 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,647
| 36.596184
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l4c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l4c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
w = (const void*) ((uintptr_t) w + 24 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) w)[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) w)[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) w)[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) w)[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) w)[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) w)[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
w = (const void*) ((uintptr_t) w + 28 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = __builtin_wasm_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = __builtin_wasm_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = __builtin_wasm_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = __builtin_wasm_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,857
| 36.930048
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l8c8s8r-minmax-fp32-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,835
| 41.365796
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l8c8s8r-minmax-fp32-neonv8-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l8c8s8r__neonv8_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,545
| 40.976077
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l8c8s8r-minmax-fp32-sse41-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_6f6m7l8c8s8r__sse41_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) b, vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 48 * sizeof(int8_t));
_mm_storeu_si128((__m128i*) (b), vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
i6 += 8;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 56 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,195
| 40.719424
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l8c8s8r-minmax-rndnu-neon-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_6f6m7l8c8s8r__neon_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,940
| 35.147392
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l8c8s8r-minmax-rndnu-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_6f6m7l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,759
| 41.386635
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-6f6m7l8c8s8r-minmax-rndnu-neon-mul8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_6f6m7l8c8s8r__neon_mul8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,440
| 36.507527
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l1c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_8f8m9l1c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) w)[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + 9 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,459
| 34.337838
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l1c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_8f8m9l1c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) w)[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + 9 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,347
| 33.841751
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l1c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_8f8m9l1c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) w)[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + 9 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,363
| 34.013514
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l1c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_8f8m9l1c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) w)[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + 9 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,477
| 34.398649
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l2c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_8f8m9l2c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) w)[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) w)[8];
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,349
| 33.317032
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l2c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_8f8m9l2c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) w)[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) w)[8];
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,172
| 32.79062
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l2c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_8f8m9l2c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) w)[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) w)[8];
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,245
| 33.141653
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l2c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_8f8m9l2c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
*b++ = vacc;
}
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) w)[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) w)[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) w)[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) w)[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) w)[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) w)[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) w)[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) w)[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) w)[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) w)[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) w)[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) w)[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) w)[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) w)[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) w)[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) w)[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) w)[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) w)[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) w)[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) w)[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) w)[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) w)[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) w)[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) w)[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) w)[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) w)[8];
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,407
| 33.41484
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l8c8s8r-minmax-fp32-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_8f8m9l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7));
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8));
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,705
| 42.498998
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l8c8s8r-minmax-fp32-neonv8-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_8f8m9l8c8s8r__neonv8_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7));
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8));
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,415
| 42.177419
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l8c8s8r-minmax-rndnu-neon-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_8f8m9l8c8s8r__neon_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7);
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8);
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,200
| 35.573333
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l8c8s8r-minmax-rndnu-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_8f8m9l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7));
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8));
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,629
| 42.521127
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-8f8m9l8c8s8r-minmax-rndnu-neon-mul8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_8f8m9l8c8s8r__neon_mul8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7);
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8);
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,200
| 37.062837
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-fp32-avx-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p16c__avx_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
__m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 8));
__m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 12));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(vi0x89ABCDEF);
const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk0x89ABCDEF = _mm_cvtepi8_epi16(vk0x89ABCDEF);
i0 += 16;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
__m128i vprod89ABCDEF = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(vi1x89ABCDEF);
const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk1x89ABCDEF = _mm_cvtepi8_epi16(vk1x89ABCDEF);
i1 += 16;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vprod89ABCDEF = _mm_add_epi16(vprod89ABCDEF, _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(vi2x89ABCDEF);
const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk2x89ABCDEF = _mm_cvtepi8_epi16(vk2x89ABCDEF);
i2 += 16;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(vi3x89ABCDEF);
const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk3x89ABCDEF = _mm_cvtepi8_epi16(vk3x89ABCDEF);
i3 += 16;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vprod89ABCDEF = _mm_add_epi16(vprod89ABCDEF, _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(vi4x89ABCDEF);
const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t)));
const __m128i vxk4x89ABCDEF = _mm_cvtepi8_epi16(vk4x89ABCDEF);
i4 += 16;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(vi5x89ABCDEF);
const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t)));
const __m128i vxk5x89ABCDEF = _mm_cvtepi8_epi16(vk5x89ABCDEF);
i5 += 16;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vprod89ABCDEF = _mm_add_epi16(vprod89ABCDEF, _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(vi6x89ABCDEF);
const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t)));
const __m128i vxk6x89ABCDEF = _mm_cvtepi8_epi16(vk6x89ABCDEF);
i6 += 16;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
const __m128i vxi7x89ABCDEF = _mm_cvtepi8_epi16(vi7x89ABCDEF);
const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t)));
const __m128i vxk7x89ABCDEF = _mm_cvtepi8_epi16(vk7x89ABCDEF);
i7 += 16;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
vprod89ABCDEF = _mm_add_epi16(vprod89ABCDEF, _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
const __m128i vxi8x89ABCDEF = _mm_cvtepi8_epi16(vi8x89ABCDEF);
const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t)));
const __m128i vxk8x89ABCDEF = _mm_cvtepi8_epi16(vk8x89ABCDEF);
i8 += 16;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vscaled89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vscaledCDEF = _mm_cvtepi32_ps(vaccCDEF);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
vscaled89AB = _mm_mul_ps(vscaled89AB, vscale);
vscaledCDEF = _mm_mul_ps(vscaledCDEF, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vscaled89AB = _mm_min_ps(vscaled89AB, voutput_max_less_zero_point);
vscaledCDEF = _mm_min_ps(vscaledCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
vacc89AB = _mm_cvtps_epi32(vscaled89AB);
vaccCDEF = _mm_cvtps_epi32(vscaledCDEF);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 16));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 32));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 64));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 80));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
i6 += 8;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 112));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
i7 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 128));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
i8 += 8;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
k += 8;
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
w = (const void*) ((const int32_t*) w + 8);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if XNN_LIKELY(c >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
c -= 8;
} else {
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,323
| 47.244344
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-fp32-avx-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p16c__avx_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
__m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 8));
__m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 12));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(vi0x89ABCDEF);
const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk0x89ABCDEF = _mm_cvtepi8_epi16(vk0x89ABCDEF);
i0 += 16;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
__m128i vprod89ABCDEF = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(vi1x89ABCDEF);
const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk1x89ABCDEF = _mm_cvtepi8_epi16(vk1x89ABCDEF);
i1 += 16;
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(vi2x89ABCDEF);
const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk2x89ABCDEF = _mm_cvtepi8_epi16(vk2x89ABCDEF);
i2 += 16;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(vi3x89ABCDEF);
const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk3x89ABCDEF = _mm_cvtepi8_epi16(vk3x89ABCDEF);
i3 += 16;
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(vi4x89ABCDEF);
const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t)));
const __m128i vxk4x89ABCDEF = _mm_cvtepi8_epi16(vk4x89ABCDEF);
i4 += 16;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(vi5x89ABCDEF);
const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t)));
const __m128i vxk5x89ABCDEF = _mm_cvtepi8_epi16(vk5x89ABCDEF);
i5 += 16;
vprod01234567 = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(vi6x89ABCDEF);
const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t)));
const __m128i vxk6x89ABCDEF = _mm_cvtepi8_epi16(vk6x89ABCDEF);
i6 += 16;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
const __m128i vxi7x89ABCDEF = _mm_cvtepi8_epi16(vi7x89ABCDEF);
const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t)));
const __m128i vxk7x89ABCDEF = _mm_cvtepi8_epi16(vk7x89ABCDEF);
i7 += 16;
vprod01234567 = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
const __m128i vxi8x89ABCDEF = _mm_cvtepi8_epi16(vi8x89ABCDEF);
const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t)));
const __m128i vxk8x89ABCDEF = _mm_cvtepi8_epi16(vk8x89ABCDEF);
i8 += 16;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vscaled89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vscaledCDEF = _mm_cvtepi32_ps(vaccCDEF);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
vscaled89AB = _mm_mul_ps(vscaled89AB, vscale);
vscaledCDEF = _mm_mul_ps(vscaledCDEF, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vscaled89AB = _mm_min_ps(vscaled89AB, voutput_max_less_zero_point);
vscaledCDEF = _mm_min_ps(vscaledCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
vacc89AB = _mm_cvtps_epi32(vscaled89AB);
vaccCDEF = _mm_cvtps_epi32(vscaledCDEF);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 16));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 32));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 64));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 80));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
i6 += 8;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 112));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
i7 += 8;
vprod01234567 = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 128));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
i8 += 8;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
k += 8;
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
w = (const void*) ((const int32_t*) w + 8);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if XNN_LIKELY(c >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
c -= 8;
} else {
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,241
| 48.875536
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-fp32-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p16c__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vaccCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi0x89ABCDEF = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi0x89ABCDEF), vget_low_s16(vk0x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi0x89ABCDEF), vget_high_s16(vk0x89ABCDEF));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi1x89ABCDEF = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi1x89ABCDEF), vget_low_s16(vk1x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi1x89ABCDEF), vget_high_s16(vk1x89ABCDEF));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi2x89ABCDEF = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi2x89ABCDEF), vget_low_s16(vk2x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi2x89ABCDEF), vget_high_s16(vk2x89ABCDEF));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi3x89ABCDEF = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi3x89ABCDEF), vget_low_s16(vk3x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi3x89ABCDEF), vget_high_s16(vk3x89ABCDEF));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi4x89ABCDEF = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi4x89ABCDEF), vget_low_s16(vk4x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi4x89ABCDEF), vget_high_s16(vk4x89ABCDEF));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi5x89ABCDEF = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi5x89ABCDEF), vget_low_s16(vk5x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi5x89ABCDEF), vget_high_s16(vk5x89ABCDEF));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi6x89ABCDEF = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi6x89ABCDEF), vget_low_s16(vk6x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi6x89ABCDEF), vget_high_s16(vk6x89ABCDEF));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi7x89ABCDEF = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi7x89ABCDEF), vget_low_s16(vk7x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi7x89ABCDEF), vget_high_s16(vk7x89ABCDEF));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi8x89ABCDEF = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi8x89ABCDEF), vget_low_s16(vk8x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi8x89ABCDEF), vget_high_s16(vk8x89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(k)); k += 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8((const void*) (k + 8)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8((const void*) (k + 24)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8((const void*) (k + 40)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) (k + 56)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8((const void*) (k + 72)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8((const void*) (k + 88)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8((const void*) (k + 104)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8((const void*) (k + 120)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,675
| 50.950156
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-fp32-neonv8-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p16c__neonv8_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vaccCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi0x89ABCDEF = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi0x89ABCDEF), vget_low_s16(vk0x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi0x89ABCDEF), vget_high_s16(vk0x89ABCDEF));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi1x89ABCDEF = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi1x89ABCDEF), vget_low_s16(vk1x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi1x89ABCDEF), vget_high_s16(vk1x89ABCDEF));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi2x89ABCDEF = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi2x89ABCDEF), vget_low_s16(vk2x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi2x89ABCDEF), vget_high_s16(vk2x89ABCDEF));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi3x89ABCDEF = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi3x89ABCDEF), vget_low_s16(vk3x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi3x89ABCDEF), vget_high_s16(vk3x89ABCDEF));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi4x89ABCDEF = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi4x89ABCDEF), vget_low_s16(vk4x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi4x89ABCDEF), vget_high_s16(vk4x89ABCDEF));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi5x89ABCDEF = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi5x89ABCDEF), vget_low_s16(vk5x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi5x89ABCDEF), vget_high_s16(vk5x89ABCDEF));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi6x89ABCDEF = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi6x89ABCDEF), vget_low_s16(vk6x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi6x89ABCDEF), vget_high_s16(vk6x89ABCDEF));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi7x89ABCDEF = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi7x89ABCDEF), vget_low_s16(vk7x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi7x89ABCDEF), vget_high_s16(vk7x89ABCDEF));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi8x89ABCDEF = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi8x89ABCDEF), vget_low_s16(vk8x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi8x89ABCDEF), vget_high_s16(vk8x89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(k)); k += 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8((const void*) (k + 8)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8((const void*) (k + 24)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8((const void*) (k + 40)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) (k + 56)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8((const void*) (k + 72)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8((const void*) (k + 88)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8((const void*) (k + 104)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8((const void*) (k + 120)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,309
| 50.289308
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-fp32-sse2-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p16c__sse2_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
__m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 8));
__m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 12));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t)));
i0 += 16;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
const __m128i vxi0x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x89ABCDEF, vi0x89ABCDEF), 8);
const __m128i vxk0x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x89ABCDEF, vk0x89ABCDEF), 8);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
__m128i vprod89ABCDEF = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t)));
i1 += 16;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
const __m128i vxi1x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x89ABCDEF, vi1x89ABCDEF), 8);
const __m128i vxk1x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x89ABCDEF, vk1x89ABCDEF), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vprod89ABCDEF = _mm_add_epi16(vprod89ABCDEF, _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF));
const __m128i vsignprod1x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod1x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod1x01234567));
const __m128i vsignprod1x89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod89ABCDEF);
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vprod89ABCDEF, vsignprod1x89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vprod89ABCDEF, vsignprod1x89ABCDEF));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t)));
i2 += 16;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
const __m128i vxi2x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x89ABCDEF, vi2x89ABCDEF), 8);
const __m128i vxk2x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x89ABCDEF, vk2x89ABCDEF), 8);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t)));
i3 += 16;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
const __m128i vxi3x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x89ABCDEF, vi3x89ABCDEF), 8);
const __m128i vxk3x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x89ABCDEF, vk3x89ABCDEF), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vprod89ABCDEF = _mm_add_epi16(vprod89ABCDEF, _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF));
const __m128i vsignprod3x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod3x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod3x01234567));
const __m128i vsignprod3x89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod89ABCDEF);
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vprod89ABCDEF, vsignprod3x89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vprod89ABCDEF, vsignprod3x89ABCDEF));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t)));
i4 += 16;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
const __m128i vxi4x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x89ABCDEF, vi4x89ABCDEF), 8);
const __m128i vxk4x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x89ABCDEF, vk4x89ABCDEF), 8);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t)));
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t)));
i5 += 16;
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vxk5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk5x01234567, vk5x01234567), 8);
const __m128i vxi5x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x89ABCDEF, vi5x89ABCDEF), 8);
const __m128i vxk5x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vk5x89ABCDEF, vk5x89ABCDEF), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vprod89ABCDEF = _mm_add_epi16(vprod89ABCDEF, _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF));
const __m128i vsignprod5x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod5x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod5x01234567));
const __m128i vsignprod5x89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod89ABCDEF);
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vprod89ABCDEF, vsignprod5x89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vprod89ABCDEF, vsignprod5x89ABCDEF));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t)));
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t)));
i6 += 16;
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
const __m128i vxk6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk6x01234567, vk6x01234567), 8);
const __m128i vxi6x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x89ABCDEF, vi6x89ABCDEF), 8);
const __m128i vxk6x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vk6x89ABCDEF, vk6x89ABCDEF), 8);
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t)));
const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t)));
i7 += 16;
const __m128i vxi7x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi7x01234567, vi7x01234567), 8);
const __m128i vxk7x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk7x01234567, vk7x01234567), 8);
const __m128i vxi7x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi7x89ABCDEF, vi7x89ABCDEF), 8);
const __m128i vxk7x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vk7x89ABCDEF, vk7x89ABCDEF), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
vprod89ABCDEF = _mm_add_epi16(vprod89ABCDEF, _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF));
const __m128i vsignprod7x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod7x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod7x01234567));
const __m128i vsignprod7x89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod89ABCDEF);
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vprod89ABCDEF, vsignprod7x89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vprod89ABCDEF, vsignprod7x89ABCDEF));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t)));
const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t)));
i8 += 16;
const __m128i vxi8x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi8x01234567, vi8x01234567), 8);
const __m128i vxk8x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk8x01234567, vk8x01234567), 8);
const __m128i vxi8x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi8x89ABCDEF, vi8x89ABCDEF), 8);
const __m128i vxk8x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vk8x89ABCDEF, vk8x89ABCDEF), 8);
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
const __m128i vsignprod8x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod8x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod8x01234567));
const __m128i vsignprod8x89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod89ABCDEF);
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vprod89ABCDEF, vsignprod8x89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vprod89ABCDEF, vsignprod8x89ABCDEF));
w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vscaled89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vscaledCDEF = _mm_cvtepi32_ps(vaccCDEF);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
vscaled89AB = _mm_mul_ps(vscaled89AB, vscale);
vscaledCDEF = _mm_mul_ps(vscaledCDEF, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vscaled89AB = _mm_min_ps(vscaled89AB, voutput_max_less_zero_point);
vscaledCDEF = _mm_min_ps(vscaledCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
vacc89AB = _mm_cvtps_epi32(vscaled89AB);
vaccCDEF = _mm_cvtps_epi32(vscaledCDEF);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 16));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
const __m128i vsignprod1x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod1x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod1x01234567));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 32));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
const __m128i vsignprod3x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod3x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod3x01234567));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 64));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 80));
i5 += 8;
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vxk5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk5x01234567, vk5x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
const __m128i vsignprod5x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod5x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod5x01234567));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
i6 += 8;
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
const __m128i vxk6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk6x01234567, vk6x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 112));
i7 += 8;
const __m128i vxi7x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi7x01234567, vi7x01234567), 8);
const __m128i vxk7x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk7x01234567, vk7x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
const __m128i vsignprod7x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod7x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod7x01234567));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 128));
i8 += 8;
const __m128i vxi8x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi8x01234567, vi8x01234567), 8);
const __m128i vxk8x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk8x01234567, vk8x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
const __m128i vsignprod8x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod8x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod8x01234567));
k += 8;
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
w = (const void*) ((const int32_t*) w + 8);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if XNN_LIKELY(c >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
c -= 8;
} else {
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,677
| 52.882096
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-fp32-sse41-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p16c__sse41_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
__m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 8));
__m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 12));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(vi0x89ABCDEF);
const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk0x89ABCDEF = _mm_cvtepi8_epi16(vk0x89ABCDEF);
i0 += 16;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
__m128i vprod89ABCDEF = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(vi1x89ABCDEF);
const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk1x89ABCDEF = _mm_cvtepi8_epi16(vk1x89ABCDEF);
i1 += 16;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vprod89ABCDEF = _mm_add_epi16(vprod89ABCDEF, _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(vi2x89ABCDEF);
const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk2x89ABCDEF = _mm_cvtepi8_epi16(vk2x89ABCDEF);
i2 += 16;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(vi3x89ABCDEF);
const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk3x89ABCDEF = _mm_cvtepi8_epi16(vk3x89ABCDEF);
i3 += 16;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vprod89ABCDEF = _mm_add_epi16(vprod89ABCDEF, _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(vi4x89ABCDEF);
const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t)));
const __m128i vxk4x89ABCDEF = _mm_cvtepi8_epi16(vk4x89ABCDEF);
i4 += 16;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(vi5x89ABCDEF);
const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t)));
const __m128i vxk5x89ABCDEF = _mm_cvtepi8_epi16(vk5x89ABCDEF);
i5 += 16;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vprod89ABCDEF = _mm_add_epi16(vprod89ABCDEF, _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(vi6x89ABCDEF);
const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t)));
const __m128i vxk6x89ABCDEF = _mm_cvtepi8_epi16(vk6x89ABCDEF);
i6 += 16;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
const __m128i vxi7x89ABCDEF = _mm_cvtepi8_epi16(vi7x89ABCDEF);
const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t)));
const __m128i vxk7x89ABCDEF = _mm_cvtepi8_epi16(vk7x89ABCDEF);
i7 += 16;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
vprod89ABCDEF = _mm_add_epi16(vprod89ABCDEF, _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
const __m128i vxi8x89ABCDEF = _mm_cvtepi8_epi16(vi8x89ABCDEF);
const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t)));
const __m128i vxk8x89ABCDEF = _mm_cvtepi8_epi16(vk8x89ABCDEF);
i8 += 16;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vscaled89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vscaledCDEF = _mm_cvtepi32_ps(vaccCDEF);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
vscaled89AB = _mm_mul_ps(vscaled89AB, vscale);
vscaledCDEF = _mm_mul_ps(vscaledCDEF, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vscaled89AB = _mm_min_ps(vscaled89AB, voutput_max_less_zero_point);
vscaledCDEF = _mm_min_ps(vscaledCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
vacc89AB = _mm_cvtps_epi32(vscaled89AB);
vaccCDEF = _mm_cvtps_epi32(vscaledCDEF);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 16));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 32));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 64));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 80));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
i6 += 8;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 112));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
i7 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 128));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
i8 += 8;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
k += 8;
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
w = (const void*) ((const int32_t*) w + 8);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if XNN_LIKELY(c >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
c -= 8;
} else {
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,325
| 47.248869
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-fp32-sse41-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p16c__sse41_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
__m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 8));
__m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 12));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(vi0x89ABCDEF);
const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk0x89ABCDEF = _mm_cvtepi8_epi16(vk0x89ABCDEF);
i0 += 16;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
__m128i vprod89ABCDEF = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(vi1x89ABCDEF);
const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk1x89ABCDEF = _mm_cvtepi8_epi16(vk1x89ABCDEF);
i1 += 16;
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(vi2x89ABCDEF);
const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk2x89ABCDEF = _mm_cvtepi8_epi16(vk2x89ABCDEF);
i2 += 16;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(vi3x89ABCDEF);
const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk3x89ABCDEF = _mm_cvtepi8_epi16(vk3x89ABCDEF);
i3 += 16;
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(vi4x89ABCDEF);
const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t)));
const __m128i vxk4x89ABCDEF = _mm_cvtepi8_epi16(vk4x89ABCDEF);
i4 += 16;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(vi5x89ABCDEF);
const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t)));
const __m128i vxk5x89ABCDEF = _mm_cvtepi8_epi16(vk5x89ABCDEF);
i5 += 16;
vprod01234567 = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(vi6x89ABCDEF);
const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t)));
const __m128i vxk6x89ABCDEF = _mm_cvtepi8_epi16(vk6x89ABCDEF);
i6 += 16;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
const __m128i vxi7x89ABCDEF = _mm_cvtepi8_epi16(vi7x89ABCDEF);
const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t)));
const __m128i vxk7x89ABCDEF = _mm_cvtepi8_epi16(vk7x89ABCDEF);
i7 += 16;
vprod01234567 = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
const __m128i vxi8x89ABCDEF = _mm_cvtepi8_epi16(vi8x89ABCDEF);
const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t)));
const __m128i vxk8x89ABCDEF = _mm_cvtepi8_epi16(vk8x89ABCDEF);
i8 += 16;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vscaled89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vscaledCDEF = _mm_cvtepi32_ps(vaccCDEF);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
vscaled89AB = _mm_mul_ps(vscaled89AB, vscale);
vscaledCDEF = _mm_mul_ps(vscaledCDEF, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vscaled89AB = _mm_min_ps(vscaled89AB, voutput_max_less_zero_point);
vscaledCDEF = _mm_min_ps(vscaledCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
vacc89AB = _mm_cvtps_epi32(vscaled89AB);
vaccCDEF = _mm_cvtps_epi32(vscaledCDEF);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 16));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 32));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 64));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 80));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
i6 += 8;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 112));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
i7 += 8;
vprod01234567 = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 128));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
i8 += 8;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
k += 8;
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
w = (const void*) ((const int32_t*) w + 8);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if XNN_LIKELY(c >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
c -= 8;
} else {
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,243
| 48.879828
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-fp32-xop-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p16c__xop_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
__m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 8));
__m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 12));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(vi0x89ABCDEF);
const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk0x89ABCDEF = _mm_cvtepi8_epi16(vk0x89ABCDEF);
i0 += 16;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
__m128i vprod89ABCDEF = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(vi1x89ABCDEF);
const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk1x89ABCDEF = _mm_cvtepi8_epi16(vk1x89ABCDEF);
i1 += 16;
vprod01234567 = _mm_macc_epi16(vxi1x01234567, vxk1x01234567, vprod01234567);
vprod89ABCDEF = _mm_macc_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF, vprod89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(vi2x89ABCDEF);
const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk2x89ABCDEF = _mm_cvtepi8_epi16(vk2x89ABCDEF);
i2 += 16;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(vi3x89ABCDEF);
const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk3x89ABCDEF = _mm_cvtepi8_epi16(vk3x89ABCDEF);
i3 += 16;
vprod01234567 = _mm_macc_epi16(vxi3x01234567, vxk3x01234567, vprod01234567);
vprod89ABCDEF = _mm_macc_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF, vprod89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(vi4x89ABCDEF);
const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t)));
const __m128i vxk4x89ABCDEF = _mm_cvtepi8_epi16(vk4x89ABCDEF);
i4 += 16;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(vi5x89ABCDEF);
const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t)));
const __m128i vxk5x89ABCDEF = _mm_cvtepi8_epi16(vk5x89ABCDEF);
i5 += 16;
vprod01234567 = _mm_macc_epi16(vxi5x01234567, vxk5x01234567, vprod01234567);
vprod89ABCDEF = _mm_macc_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF, vprod89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(vi6x89ABCDEF);
const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t)));
const __m128i vxk6x89ABCDEF = _mm_cvtepi8_epi16(vk6x89ABCDEF);
i6 += 16;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
const __m128i vxi7x89ABCDEF = _mm_cvtepi8_epi16(vi7x89ABCDEF);
const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t)));
const __m128i vxk7x89ABCDEF = _mm_cvtepi8_epi16(vk7x89ABCDEF);
i7 += 16;
vprod01234567 = _mm_macc_epi16(vxi7x01234567, vxk7x01234567, vprod01234567);
vprod89ABCDEF = _mm_macc_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF, vprod89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
const __m128i vxi8x89ABCDEF = _mm_cvtepi8_epi16(vi8x89ABCDEF);
const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t)));
const __m128i vxk8x89ABCDEF = _mm_cvtepi8_epi16(vk8x89ABCDEF);
i8 += 16;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vprod89ABCDEF = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vscaled89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vscaledCDEF = _mm_cvtepi32_ps(vaccCDEF);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
vscaled89AB = _mm_mul_ps(vscaled89AB, vscale);
vscaledCDEF = _mm_mul_ps(vscaledCDEF, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vscaled89AB = _mm_min_ps(vscaled89AB, voutput_max_less_zero_point);
vscaledCDEF = _mm_min_ps(vscaledCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
vacc89AB = _mm_cvtps_epi32(vscaled89AB);
vaccCDEF = _mm_cvtps_epi32(vscaledCDEF);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 16));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_macc_epi16(vxi1x01234567, vxk1x01234567, vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 32));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_macc_epi16(vxi3x01234567, vxk3x01234567, vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 64));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 80));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_macc_epi16(vxi5x01234567, vxk5x01234567, vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
i6 += 8;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 112));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
i7 += 8;
vprod01234567 = _mm_macc_epi16(vxi7x01234567, vxk7x01234567, vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 128));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
i8 += 8;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
k += 8;
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
w = (const void*) ((const int32_t*) w + 8);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if XNN_LIKELY(c >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
c -= 8;
} else {
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 21,184
| 46.5
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-rndnu-neon-mla8-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_9p16c__neon_mla8_ld128(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vaccCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x16_t vi0x0123456789ABCDEF = vld1q_s8(i0); i0 += 16;
const int8x16_t vk0x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
int16x8_t vprod01234567 = vmull_s8(vget_low_s8(vi0x0123456789ABCDEF), vget_low_s8(vk0x0123456789ABCDEF));
int16x8_t vprod89ABCDEF = vmull_s8(vget_high_s8(vi0x0123456789ABCDEF), vget_high_s8(vk0x0123456789ABCDEF));
const int8x16_t vi1x0123456789ABCDEF = vld1q_s8(i1); i1 += 16;
const int8x16_t vk1x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi1x0123456789ABCDEF), vget_low_s8(vk1x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi1x0123456789ABCDEF), vget_high_s8(vk1x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi2x0123456789ABCDEF = vld1q_s8(i2); i2 += 16;
const int8x16_t vk2x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi2x0123456789ABCDEF), vget_low_s8(vk2x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi2x0123456789ABCDEF), vget_high_s8(vk2x0123456789ABCDEF));
const int8x16_t vi3x0123456789ABCDEF = vld1q_s8(i3); i3 += 16;
const int8x16_t vk3x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi3x0123456789ABCDEF), vget_low_s8(vk3x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi3x0123456789ABCDEF), vget_high_s8(vk3x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi4x0123456789ABCDEF = vld1q_s8(i4); i4 += 16;
const int8x16_t vk4x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi4x0123456789ABCDEF), vget_low_s8(vk4x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi4x0123456789ABCDEF), vget_high_s8(vk4x0123456789ABCDEF));
const int8x16_t vi5x0123456789ABCDEF = vld1q_s8(i5); i5 += 16;
const int8x16_t vk5x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi5x0123456789ABCDEF), vget_low_s8(vk5x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi5x0123456789ABCDEF), vget_high_s8(vk5x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi6x0123456789ABCDEF = vld1q_s8(i6); i6 += 16;
const int8x16_t vk6x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi6x0123456789ABCDEF), vget_low_s8(vk6x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi6x0123456789ABCDEF), vget_high_s8(vk6x0123456789ABCDEF));
const int8x16_t vi7x0123456789ABCDEF = vld1q_s8(i7); i7 += 16;
const int8x16_t vk7x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi7x0123456789ABCDEF), vget_low_s8(vk7x0123456789ABCDEF));
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi7x0123456789ABCDEF), vget_high_s8(vk7x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi8x0123456789ABCDEF = vld1q_s8(i8); i8 += 16;
const int8x16_t vk8x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi8x0123456789ABCDEF), vget_low_s8(vk8x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi8x0123456789ABCDEF), vget_high_s8(vk8x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vright_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(k); k += 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8((const void*) (k + 8));
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8((const void*) (k + 24));
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8((const void*) (k + 40));
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8((const void*) (k + 56));
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8((const void*) (k + 72));
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8((const void*) (k + 88));
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8((const void*) (k + 104));
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8((const void*) (k + 120));
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,096
| 44.038339
| 118
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-rndnu-neon-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_9p16c__neon_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vaccCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
int16x8_t vprod89ABCDEF = vmull_s8(vi0x89ABCDEF, vk0x89ABCDEF);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi1x89ABCDEF, vk1x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vprod89ABCDEF = vmull_s8(vi2x89ABCDEF, vk2x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi3x89ABCDEF, vk3x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vprod89ABCDEF = vmull_s8(vi4x89ABCDEF, vk4x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi5x89ABCDEF, vk5x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vprod89ABCDEF = vmull_s8(vi6x89ABCDEF, vk6x89ABCDEF);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi7x89ABCDEF = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vi7x89ABCDEF, vk7x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi8x89ABCDEF = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vprod89ABCDEF = vmull_s8(vi8x89ABCDEF, vk8x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vright_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(k); k += 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8((const void*) (k + 8));
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8((const void*) (k + 24));
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8((const void*) (k + 40));
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8((const void*) (k + 56));
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8((const void*) (k + 72));
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8((const void*) (k + 88));
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8((const void*) (k + 104));
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8((const void*) (k + 120));
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,320
| 42.265861
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-rndnu-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_9p16c__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vaccCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi0x89ABCDEF = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi0x89ABCDEF), vget_low_s16(vk0x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi0x89ABCDEF), vget_high_s16(vk0x89ABCDEF));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi1x89ABCDEF = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi1x89ABCDEF), vget_low_s16(vk1x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi1x89ABCDEF), vget_high_s16(vk1x89ABCDEF));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi2x89ABCDEF = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi2x89ABCDEF), vget_low_s16(vk2x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi2x89ABCDEF), vget_high_s16(vk2x89ABCDEF));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi3x89ABCDEF = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi3x89ABCDEF), vget_low_s16(vk3x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi3x89ABCDEF), vget_high_s16(vk3x89ABCDEF));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi4x89ABCDEF = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi4x89ABCDEF), vget_low_s16(vk4x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi4x89ABCDEF), vget_high_s16(vk4x89ABCDEF));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi5x89ABCDEF = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi5x89ABCDEF), vget_low_s16(vk5x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi5x89ABCDEF), vget_high_s16(vk5x89ABCDEF));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi6x89ABCDEF = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi6x89ABCDEF), vget_low_s16(vk6x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi6x89ABCDEF), vget_high_s16(vk6x89ABCDEF));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi7x89ABCDEF = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi7x89ABCDEF), vget_low_s16(vk7x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi7x89ABCDEF), vget_high_s16(vk7x89ABCDEF));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi8x89ABCDEF = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi8x89ABCDEF), vget_low_s16(vk8x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi8x89ABCDEF), vget_high_s16(vk8x89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vright_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(k)); k += 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8((const void*) (k + 8)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8((const void*) (k + 24)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8((const void*) (k + 40)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) (k + 56)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8((const void*) (k + 72)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8((const void*) (k + 88)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8((const void*) (k + 104)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8((const void*) (k + 120)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,559
| 50.912226
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-rndnu-neon-mul8-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_9p16c__neon_mul8_ld128(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vaccCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x16_t vi0x0123456789ABCDEF = vld1q_s8(i0); i0 += 16;
const int8x16_t vk0x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
int16x8_t vprod01234567 = vmull_s8(vget_low_s8(vi0x0123456789ABCDEF), vget_low_s8(vk0x0123456789ABCDEF));
int16x8_t vprod89ABCDEF = vmull_s8(vget_high_s8(vi0x0123456789ABCDEF), vget_high_s8(vk0x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi1x0123456789ABCDEF = vld1q_s8(i1); i1 += 16;
const int8x16_t vk1x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi1x0123456789ABCDEF), vget_low_s8(vk1x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi1x0123456789ABCDEF), vget_high_s8(vk1x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi2x0123456789ABCDEF = vld1q_s8(i2); i2 += 16;
const int8x16_t vk2x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi2x0123456789ABCDEF), vget_low_s8(vk2x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi2x0123456789ABCDEF), vget_high_s8(vk2x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi3x0123456789ABCDEF = vld1q_s8(i3); i3 += 16;
const int8x16_t vk3x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi3x0123456789ABCDEF), vget_low_s8(vk3x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi3x0123456789ABCDEF), vget_high_s8(vk3x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi4x0123456789ABCDEF = vld1q_s8(i4); i4 += 16;
const int8x16_t vk4x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi4x0123456789ABCDEF), vget_low_s8(vk4x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi4x0123456789ABCDEF), vget_high_s8(vk4x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi5x0123456789ABCDEF = vld1q_s8(i5); i5 += 16;
const int8x16_t vk5x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi5x0123456789ABCDEF), vget_low_s8(vk5x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi5x0123456789ABCDEF), vget_high_s8(vk5x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi6x0123456789ABCDEF = vld1q_s8(i6); i6 += 16;
const int8x16_t vk6x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi6x0123456789ABCDEF), vget_low_s8(vk6x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi6x0123456789ABCDEF), vget_high_s8(vk6x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi7x0123456789ABCDEF = vld1q_s8(i7); i7 += 16;
const int8x16_t vk7x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi7x0123456789ABCDEF), vget_low_s8(vk7x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi7x0123456789ABCDEF), vget_high_s8(vk7x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x16_t vi8x0123456789ABCDEF = vld1q_s8(i8); i8 += 16;
const int8x16_t vk8x0123456789ABCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
vprod01234567 = vmull_s8(vget_low_s8(vi8x0123456789ABCDEF), vget_low_s8(vk8x0123456789ABCDEF));
vprod89ABCDEF = vmull_s8(vget_high_s8(vi8x0123456789ABCDEF), vget_high_s8(vk8x0123456789ABCDEF));
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vright_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(k); k += 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8((const void*) (k + 8));
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8((const void*) (k + 24));
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8((const void*) (k + 40));
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8((const void*) (k + 56));
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8((const void*) (k + 72));
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8((const void*) (k + 88));
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8((const void*) (k + 104));
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8((const void*) (k + 120));
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,552
| 45.151335
| 113
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p16c-minmax-rndnu-neon-mul8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_9p16c__neon_mul8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 16; c -= 16) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vaccCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
int16x8_t vprod89ABCDEF = vmull_s8(vi0x89ABCDEF, vk0x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vprod89ABCDEF = vmull_s8(vi1x89ABCDEF, vk1x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vprod89ABCDEF = vmull_s8(vi2x89ABCDEF, vk2x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vprod89ABCDEF = vmull_s8(vi3x89ABCDEF, vk3x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vprod89ABCDEF = vmull_s8(vi4x89ABCDEF, vk4x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vprod89ABCDEF = vmull_s8(vi5x89ABCDEF, vk5x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vprod89ABCDEF = vmull_s8(vi6x89ABCDEF, vk6x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi7x89ABCDEF = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vprod89ABCDEF = vmull_s8(vi7x89ABCDEF, vk7x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vi8x89ABCDEF = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vprod89ABCDEF = vmull_s8(vi8x89ABCDEF, vk8x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vright_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(k); k += 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8((const void*) (k + 8));
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8((const void*) (k + 24));
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8((const void*) (k + 40));
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8((const void*) (k + 56));
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8((const void*) (k + 72));
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8((const void*) (k + 88));
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8((const void*) (k + 104));
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8((const void*) (k + 120));
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,776
| 43.442254
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p1c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p1c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,777
| 34.924812
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p1c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p1c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,663
| 33.80597
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p1c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p1c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,681
| 34.203008
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p1c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p1c__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(int8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,795
| 35.06015
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p1c-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_9p1c__scalar(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const uint32_t vshift = params->rndnu_scalar.shift;
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8];
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(int8_t));
const int64_t vextacc = math_mulext_s32(vacc, vmultiplier) + vrounding;
int32_t vout = (int32_t) math_asr_s64(vextacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
vout += voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 4,776
| 34.917293
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p2c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p2c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,069
| 35.28
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p2c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p2c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,886
| 33.988189
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p2c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p2c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,965
| 34.864
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p2c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p2c__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,127
| 35.512
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p2c-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_9p2c__scalar(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const uint32_t vshift = params->rndnu_scalar.shift;
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(int8_t));
const int64_t vextacc0 = math_mulext_s32(vacc0, vmultiplier) + vrounding;
const int64_t vextacc1 = math_mulext_s32(vacc1, vmultiplier) + vrounding;
int32_t vout0 = (int32_t) math_asr_s64(vextacc0, vshift);
int32_t vout1 = (int32_t) math_asr_s64(vextacc1, vshift);
vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
vout0 += voutput_zero_point;
vout1 += voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) *i0;
const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1;
const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2;
const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3;
const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4;
const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5;
const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6;
const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7;
const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8;
const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16];
vacc += vi8 * vk8;
const int64_t vextacc = math_mulext_s32(vacc, vmultiplier) + vrounding;
int32_t vout = (int32_t) math_asr_s64(vextacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
vout += voutput_zero_point;
*output++ = (int8_t) vout;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,035
| 35.582996
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p32c-minmax-fp32-neonv8-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p32c__neonv8_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 32; c -= 32) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vaccCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vaccGHIJ = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vaccKLMN = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vaccOPQR = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vaccSTUV = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi0x89ABCDEF = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi0xGHIJKLMN = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0xGHIJKLMN = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi0xOPQRSTUV = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0xOPQRSTUV = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi0x89ABCDEF), vget_low_s16(vk0x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi0x89ABCDEF), vget_high_s16(vk0x89ABCDEF));
vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi0xGHIJKLMN), vget_low_s16(vk0xGHIJKLMN));
vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi0xGHIJKLMN), vget_high_s16(vk0xGHIJKLMN));
vaccOPQR = vmlal_s16(vaccOPQR, vget_low_s16(vi0xOPQRSTUV), vget_low_s16(vk0xOPQRSTUV));
vaccSTUV = vmlal_s16(vaccSTUV, vget_high_s16(vi0xOPQRSTUV), vget_high_s16(vk0xOPQRSTUV));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi1x89ABCDEF = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi1xGHIJKLMN = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1xGHIJKLMN = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi1xOPQRSTUV = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1xOPQRSTUV = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi1x89ABCDEF), vget_low_s16(vk1x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi1x89ABCDEF), vget_high_s16(vk1x89ABCDEF));
vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi1xGHIJKLMN), vget_low_s16(vk1xGHIJKLMN));
vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi1xGHIJKLMN), vget_high_s16(vk1xGHIJKLMN));
vaccOPQR = vmlal_s16(vaccOPQR, vget_low_s16(vi1xOPQRSTUV), vget_low_s16(vk1xOPQRSTUV));
vaccSTUV = vmlal_s16(vaccSTUV, vget_high_s16(vi1xOPQRSTUV), vget_high_s16(vk1xOPQRSTUV));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi2x89ABCDEF = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi2xGHIJKLMN = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2xGHIJKLMN = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi2xOPQRSTUV = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2xOPQRSTUV = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi2x89ABCDEF), vget_low_s16(vk2x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi2x89ABCDEF), vget_high_s16(vk2x89ABCDEF));
vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi2xGHIJKLMN), vget_low_s16(vk2xGHIJKLMN));
vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi2xGHIJKLMN), vget_high_s16(vk2xGHIJKLMN));
vaccOPQR = vmlal_s16(vaccOPQR, vget_low_s16(vi2xOPQRSTUV), vget_low_s16(vk2xOPQRSTUV));
vaccSTUV = vmlal_s16(vaccSTUV, vget_high_s16(vi2xOPQRSTUV), vget_high_s16(vk2xOPQRSTUV));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi3x89ABCDEF = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi3xGHIJKLMN = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3xGHIJKLMN = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi3xOPQRSTUV = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3xOPQRSTUV = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi3x89ABCDEF), vget_low_s16(vk3x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi3x89ABCDEF), vget_high_s16(vk3x89ABCDEF));
vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi3xGHIJKLMN), vget_low_s16(vk3xGHIJKLMN));
vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi3xGHIJKLMN), vget_high_s16(vk3xGHIJKLMN));
vaccOPQR = vmlal_s16(vaccOPQR, vget_low_s16(vi3xOPQRSTUV), vget_low_s16(vk3xOPQRSTUV));
vaccSTUV = vmlal_s16(vaccSTUV, vget_high_s16(vi3xOPQRSTUV), vget_high_s16(vk3xOPQRSTUV));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi4x89ABCDEF = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi4xGHIJKLMN = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4xGHIJKLMN = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi4xOPQRSTUV = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4xOPQRSTUV = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi4x89ABCDEF), vget_low_s16(vk4x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi4x89ABCDEF), vget_high_s16(vk4x89ABCDEF));
vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi4xGHIJKLMN), vget_low_s16(vk4xGHIJKLMN));
vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi4xGHIJKLMN), vget_high_s16(vk4xGHIJKLMN));
vaccOPQR = vmlal_s16(vaccOPQR, vget_low_s16(vi4xOPQRSTUV), vget_low_s16(vk4xOPQRSTUV));
vaccSTUV = vmlal_s16(vaccSTUV, vget_high_s16(vi4xOPQRSTUV), vget_high_s16(vk4xOPQRSTUV));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi5x89ABCDEF = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi5xGHIJKLMN = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5xGHIJKLMN = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi5xOPQRSTUV = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5xOPQRSTUV = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi5x89ABCDEF), vget_low_s16(vk5x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi5x89ABCDEF), vget_high_s16(vk5x89ABCDEF));
vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi5xGHIJKLMN), vget_low_s16(vk5xGHIJKLMN));
vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi5xGHIJKLMN), vget_high_s16(vk5xGHIJKLMN));
vaccOPQR = vmlal_s16(vaccOPQR, vget_low_s16(vi5xOPQRSTUV), vget_low_s16(vk5xOPQRSTUV));
vaccSTUV = vmlal_s16(vaccSTUV, vget_high_s16(vi5xOPQRSTUV), vget_high_s16(vk5xOPQRSTUV));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi6x89ABCDEF = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi6xGHIJKLMN = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6xGHIJKLMN = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi6xOPQRSTUV = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6xOPQRSTUV = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi6x89ABCDEF), vget_low_s16(vk6x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi6x89ABCDEF), vget_high_s16(vk6x89ABCDEF));
vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi6xGHIJKLMN), vget_low_s16(vk6xGHIJKLMN));
vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi6xGHIJKLMN), vget_high_s16(vk6xGHIJKLMN));
vaccOPQR = vmlal_s16(vaccOPQR, vget_low_s16(vi6xOPQRSTUV), vget_low_s16(vk6xOPQRSTUV));
vaccSTUV = vmlal_s16(vaccSTUV, vget_high_s16(vi6xOPQRSTUV), vget_high_s16(vk6xOPQRSTUV));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi7x89ABCDEF = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi7xGHIJKLMN = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7xGHIJKLMN = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi7xOPQRSTUV = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7xOPQRSTUV = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi7x89ABCDEF), vget_low_s16(vk7x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi7x89ABCDEF), vget_high_s16(vk7x89ABCDEF));
vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi7xGHIJKLMN), vget_low_s16(vk7xGHIJKLMN));
vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi7xGHIJKLMN), vget_high_s16(vk7xGHIJKLMN));
vaccOPQR = vmlal_s16(vaccOPQR, vget_low_s16(vi7xOPQRSTUV), vget_low_s16(vk7xOPQRSTUV));
vaccSTUV = vmlal_s16(vaccSTUV, vget_high_s16(vi7xOPQRSTUV), vget_high_s16(vk7xOPQRSTUV));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi8x89ABCDEF = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x89ABCDEF = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi8xGHIJKLMN = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8xGHIJKLMN = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vi8xOPQRSTUV = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8xOPQRSTUV = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
vacc89AB = vmlal_s16(vacc89AB, vget_low_s16(vi8x89ABCDEF), vget_low_s16(vk8x89ABCDEF));
vaccCDEF = vmlal_s16(vaccCDEF, vget_high_s16(vi8x89ABCDEF), vget_high_s16(vk8x89ABCDEF));
vaccGHIJ = vmlal_s16(vaccGHIJ, vget_low_s16(vi8xGHIJKLMN), vget_low_s16(vk8xGHIJKLMN));
vaccKLMN = vmlal_s16(vaccKLMN, vget_high_s16(vi8xGHIJKLMN), vget_high_s16(vk8xGHIJKLMN));
vaccOPQR = vmlal_s16(vaccOPQR, vget_low_s16(vi8xOPQRSTUV), vget_low_s16(vk8xOPQRSTUV));
vaccSTUV = vmlal_s16(vaccSTUV, vget_high_s16(vi8xOPQRSTUV), vget_high_s16(vk8xOPQRSTUV));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
float32x4_t vfpaccOPQR = vcvtq_f32_s32(vaccOPQR);
float32x4_t vfpaccSTUV = vcvtq_f32_s32(vaccSTUV);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vfpaccOPQR = vmulq_f32(vfpaccOPQR, vscale);
vfpaccSTUV = vmulq_f32(vfpaccSTUV, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
vaccGHIJ = vcvtnq_s32_f32(vfpaccGHIJ);
vaccKLMN = vcvtnq_s32_f32(vfpaccKLMN);
vaccOPQR = vcvtnq_s32_f32(vfpaccOPQR);
vaccSTUV = vcvtnq_s32_f32(vfpaccSTUV);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
int16x8_t vaccOPQRSTUV = vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = vqaddq_s16(vaccOPQRSTUV, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x16_t voutGHIJKLMNOPQRSTUV = vqmovn_high_s16(vqmovn_s16(vaccGHIJKLMN), vaccOPQRSTUV);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = vqaddq_s16(vaccOPQRSTUV, voutput_zero_point);
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((const int32_t*) w + 32);
do {
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(k)); k += 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8((const void*) (k + 24)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8((const void*) (k + 56)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8((const void*) (k + 88)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) (k + 120)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8((const void*) (k + 152)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8((const void*) (k + 184)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8((const void*) (k + 216)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8((const void*) (k + 248)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(c >= 8) {
vst1_s8(output, vout01234567); output += 8;
c -= 8;
} else {
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
c = 0;
}
} while (c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,769
| 58.116945
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p4c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p4c__scalar_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
const int32_t vi7x2 = (int32_t) i7[2];
const int32_t vi7x3 = (int32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29];
const int32_t vk7x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30];
const int32_t vk7x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
const int32_t vi8x2 = (int32_t) i8[2];
const int32_t vi8x3 = (int32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33];
const int32_t vk8x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34];
const int32_t vk8x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) k[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) k[4];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) k[8];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) k[12];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) k[16];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) k[20];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) k[24];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = (int32_t) k[28];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = (int32_t) k[32];
vacc += vi8 * vk8;
k += 1;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,755
| 38.37037
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p4c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p4c__scalar_imagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
const int32_t vi7x2 = (int32_t) i7[2];
const int32_t vi7x3 = (int32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29];
const int32_t vk7x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30];
const int32_t vk7x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
const int32_t vi8x2 = (int32_t) i8[2];
const int32_t vi8x3 = (int32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33];
const int32_t vk8x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34];
const int32_t vk8x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2);
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout2 = math_max_s32(vout2, vmagic_min);
vout3 = math_max_s32(vout3, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout2 = math_min_s32(vout2, vmagic_max);
vout3 = math_min_s32(vout3, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
vout2 -= vmagic_bias_less_zero_point;
vout3 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) k[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) k[4];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) k[8];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) k[12];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) k[16];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) k[20];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) k[24];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = (int32_t) k[28];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = (int32_t) k[32];
vacc += vi8 * vk8;
k += 1;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,504
| 36.893939
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p4c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p4c__scalar_lrintf(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
const int32_t vi7x2 = (int32_t) i7[2];
const int32_t vi7x3 = (int32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29];
const int32_t vk7x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30];
const int32_t vk7x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
const int32_t vi8x2 = (int32_t) i8[2];
const int32_t vi8x3 = (int32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33];
const int32_t vk8x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34];
const int32_t vk8x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) k[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) k[4];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) k[8];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) k[12];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) k[16];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) k[20];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) k[24];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = (int32_t) k[28];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = (int32_t) k[32];
vacc += vi8 * vk8;
k += 1;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,643
| 38.024691
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p4c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p4c__wasm_fmagic(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
const int32_t vi7x2 = (int32_t) i7[2];
const int32_t vi7x3 = (int32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29];
const int32_t vk7x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30];
const int32_t vk7x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
const int32_t vi8x2 = (int32_t) i8[2];
const int32_t vi8x3 = (int32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33];
const int32_t vk8x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34];
const int32_t vk8x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(int8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = __builtin_wasm_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = __builtin_wasm_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = __builtin_wasm_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = __builtin_wasm_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) k[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) k[4];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) k[8];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) k[12];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) k[16];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) k[20];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) k[24];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = (int32_t) k[28];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = (int32_t) k[32];
vacc += vi8 * vk8;
k += 1;
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,853
| 38.67284
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p4c-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_9p4c__scalar(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const uint32_t vshift = params->rndnu_scalar.shift;
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0];
const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1];
const int32_t vk0x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2];
const int32_t vk0x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3];
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) i1[0];
const int32_t vi1x1 = (int32_t) i1[1];
const int32_t vi1x2 = (int32_t) i1[2];
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4];
const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5];
const int32_t vk1x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6];
const int32_t vk1x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7];
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) i2[0];
const int32_t vi2x1 = (int32_t) i2[1];
const int32_t vi2x2 = (int32_t) i2[2];
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8];
const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9];
const int32_t vk2x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10];
const int32_t vk2x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11];
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) i3[0];
const int32_t vi3x1 = (int32_t) i3[1];
const int32_t vi3x2 = (int32_t) i3[2];
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12];
const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13];
const int32_t vk3x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14];
const int32_t vk3x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15];
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) i4[0];
const int32_t vi4x1 = (int32_t) i4[1];
const int32_t vi4x2 = (int32_t) i4[2];
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16];
const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17];
const int32_t vk4x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18];
const int32_t vk4x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19];
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) i5[0];
const int32_t vi5x1 = (int32_t) i5[1];
const int32_t vi5x2 = (int32_t) i5[2];
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20];
const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21];
const int32_t vk5x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22];
const int32_t vk5x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23];
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) i6[0];
const int32_t vi6x1 = (int32_t) i6[1];
const int32_t vi6x2 = (int32_t) i6[2];
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24];
const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25];
const int32_t vk6x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26];
const int32_t vk6x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27];
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) i7[0];
const int32_t vi7x1 = (int32_t) i7[1];
const int32_t vi7x2 = (int32_t) i7[2];
const int32_t vi7x3 = (int32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28];
const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29];
const int32_t vk7x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30];
const int32_t vk7x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31];
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) i8[0];
const int32_t vi8x1 = (int32_t) i8[1];
const int32_t vi8x2 = (int32_t) i8[2];
const int32_t vi8x3 = (int32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32];
const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33];
const int32_t vk8x2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34];
const int32_t vk8x3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35];
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(int8_t));
const int64_t vextacc0 = math_mulext_s32(vacc0, vmultiplier) + vrounding;
const int64_t vextacc1 = math_mulext_s32(vacc1, vmultiplier) + vrounding;
const int64_t vextacc2 = math_mulext_s32(vacc2, vmultiplier) + vrounding;
const int64_t vextacc3 = math_mulext_s32(vacc3, vmultiplier) + vrounding;
int32_t vout0 = (int32_t) math_asr_s64(vextacc0, vshift);
int32_t vout1 = (int32_t) math_asr_s64(vextacc1, vshift);
int32_t vout2 = (int32_t) math_asr_s64(vextacc2, vshift);
int32_t vout3 = (int32_t) math_asr_s64(vextacc3, vshift);
vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
vout2 = math_max_s32(vout2, voutput_min_less_zero_point);
vout3 = math_max_s32(vout3, voutput_min_less_zero_point);
vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
vout2 = math_min_s32(vout2, voutput_max_less_zero_point);
vout3 = math_min_s32(vout3, voutput_max_less_zero_point);
vout0 += voutput_zero_point;
vout1 += voutput_zero_point;
vout2 += voutput_zero_point;
vout3 += voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const int8_t* k = (const int8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) *i0++;
const int32_t vk0 = (int32_t) k[0];
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) *i1++;
const int32_t vk1 = (int32_t) k[4];
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) *i2++;
const int32_t vk2 = (int32_t) k[8];
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) *i3++;
const int32_t vk3 = (int32_t) k[12];
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) *i4++;
const int32_t vk4 = (int32_t) k[16];
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) *i5++;
const int32_t vk5 = (int32_t) k[20];
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) *i6++;
const int32_t vk6 = (int32_t) k[24];
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) *i7++;
const int32_t vk7 = (int32_t) k[28];
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) *i8++;
const int32_t vk8 = (int32_t) k[32];
vacc += vi8 * vk8;
k += 1;
const int64_t vextacc = math_mulext_s32(vacc, vmultiplier) + vrounding;
int32_t vout = (int32_t) math_asr_s64(vextacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
vout += voutput_zero_point;
*output++ = (int8_t) vout;
} while (--c != 0);
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,689
| 38.780564
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p8c-minmax-fp32-avx-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p8c__avx_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
i6 += 8;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
i7 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
i8 += 8;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,680
| 42.92437
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p8c-minmax-fp32-avx-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p8c__avx_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
i6 += 8;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
i7 += 8;
vprod01234567 = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
i8 += 8;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
vprod01234567 = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
vprod01234567 = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,962
| 44.477212
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p8c-minmax-fp32-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p8c__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 8)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 16)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 24)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 32)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 40)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 48)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7));
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 56)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8));
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 64)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,189
| 44.827068
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p8c-minmax-fp32-neonv8-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p8c__neonv8_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 8)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 16)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 24)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 32)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 40)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 48)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7));
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 56)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8));
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 64)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,901
| 44.254753
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p8c-minmax-fp32-sse2-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p8c__sse2_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
const __m128i vsignprod1x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod1x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod1x01234567));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
const __m128i vsignprod3x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod3x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod3x01234567));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
i5 += 8;
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vxk5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk5x01234567, vk5x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
const __m128i vsignprod5x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod5x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod5x01234567));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
i6 += 8;
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
const __m128i vxk6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk6x01234567, vk6x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
i7 += 8;
const __m128i vxi7x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi7x01234567, vi7x01234567), 8);
const __m128i vxk7x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk7x01234567, vk7x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
const __m128i vsignprod7x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod7x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod7x01234567));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
i8 += 8;
const __m128i vxi8x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi8x01234567, vi8x01234567), 8);
const __m128i vxk8x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk8x01234567, vk8x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
const __m128i vsignprod8x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod8x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod8x01234567));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
const __m128i vsignprod1x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod1x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod1x01234567));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
const __m128i vsignprod3x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod3x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod3x01234567));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vxk5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk5x01234567, vk5x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
const __m128i vsignprod5x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod5x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod5x01234567));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
const __m128i vxk6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk6x01234567, vk6x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxi7x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi7x01234567, vi7x01234567), 8);
const __m128i vxk7x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk7x01234567, vk7x01234567), 8);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
const __m128i vsignprod7x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod7x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod7x01234567));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxi8x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi8x01234567, vi8x01234567), 8);
const __m128i vxk8x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk8x01234567, vk8x01234567), 8);
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
const __m128i vsignprod8x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod01234567, vsignprod8x01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod01234567, vsignprod8x01234567));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 17,867
| 47.686649
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p8c-minmax-fp32-sse2-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p8c__sse2_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
i5 += 8;
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vxk5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk5x01234567, vk5x01234567), 8);
const __m128i vprod5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
const __m128i vprod5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod5x01234567lo, vprod5x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod5x01234567lo, vprod5x01234567hi));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
i6 += 8;
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
const __m128i vxk6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk6x01234567, vk6x01234567), 8);
const __m128i vprod6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vprod6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod6x01234567lo, vprod6x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod6x01234567lo, vprod6x01234567hi));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
i7 += 8;
const __m128i vxi7x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi7x01234567, vi7x01234567), 8);
const __m128i vxk7x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk7x01234567, vk7x01234567), 8);
const __m128i vprod7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
const __m128i vprod7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod7x01234567lo, vprod7x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod7x01234567lo, vprod7x01234567hi));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
i8 += 8;
const __m128i vxi8x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi8x01234567, vi8x01234567), 8);
const __m128i vxk8x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk8x01234567, vk8x01234567), 8);
const __m128i vprod8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
const __m128i vprod8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod8x01234567lo, vprod8x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod8x01234567lo, vprod8x01234567hi));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vxk0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk0x01234567, vk0x01234567), 8);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vxk1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk1x01234567, vk1x01234567), 8);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vxk2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk2x01234567, vk2x01234567), 8);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vxk3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk3x01234567, vk3x01234567), 8);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vxk4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk4x01234567, vk4x01234567), 8);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vxk5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk5x01234567, vk5x01234567), 8);
const __m128i vprod5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
const __m128i vprod5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod5x01234567lo, vprod5x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod5x01234567lo, vprod5x01234567hi));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
const __m128i vxk6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk6x01234567, vk6x01234567), 8);
const __m128i vprod6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vprod6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod6x01234567lo, vprod6x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod6x01234567lo, vprod6x01234567hi));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxi7x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi7x01234567, vi7x01234567), 8);
const __m128i vxk7x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk7x01234567, vk7x01234567), 8);
const __m128i vprod7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
const __m128i vprod7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod7x01234567lo, vprod7x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod7x01234567lo, vprod7x01234567hi));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxi8x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi8x01234567, vi8x01234567), 8);
const __m128i vxk8x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vk8x01234567, vk8x01234567), 8);
const __m128i vprod8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
const __m128i vprod8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod8x01234567lo, vprod8x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod8x01234567lo, vprod8x01234567hi));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,209
| 50.68798
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p8c-minmax-fp32-sse41-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p8c__sse41_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
i6 += 8;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
i7 += 8;
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
i8 += 8;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,682
| 42.929972
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p8c-minmax-fp32-sse41-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p8c__sse41_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
i6 += 8;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
i7 += 8;
vprod01234567 = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
i8 += 8;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
vprod01234567 = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
vprod01234567 = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,964
| 44.482574
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p8c-minmax-fp32-xop-mul16-add16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_dwconv_minmax_fp32_ukernel_9p8c__xop_mul16_add16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
i0 += 8;
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
i1 += 8;
vprod01234567 = _mm_macc_epi16(vxi1x01234567, vxk1x01234567, vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
i2 += 8;
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
i3 += 8;
vprod01234567 = _mm_macc_epi16(vxi3x01234567, vxk3x01234567, vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
i4 += 8;
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
i5 += 8;
vprod01234567 = _mm_macc_epi16(vxi5x01234567, vxk5x01234567, vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
i6 += 8;
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
i7 += 8;
vprod01234567 = _mm_macc_epi16(vxi7x01234567, vxk7x01234567, vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
i8 += 8;
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
__m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
vprod01234567 = _mm_macc_epi16(vxi1x01234567, vxk1x01234567, vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
vprod01234567 = _mm_macc_epi16(vxi3x01234567, vxk3x01234567, vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
vprod01234567 = _mm_macc_epi16(vxi5x01234567, vxk5x01234567, vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
vprod01234567 = _mm_macc_epi16(vxi7x01234567, vxk7x01234567, vprod01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,605
| 42.229917
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p8c-minmax-rndnu-neon-mla8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_9p8c__neon_mla8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w);
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8((const void*) ((const int8_t*) w + 8));
vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8((const void*) ((const int8_t*) w + 16));
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8((const void*) ((const int8_t*) w + 24));
vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8((const void*) ((const int8_t*) w + 32));
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8((const void*) ((const int8_t*) w + 40));
vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8((const void*) ((const int8_t*) w + 48));
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
const int8x8_t vi7x01234567 = vld1_s8(i7);
const int8x8_t vk7x01234567 = vld1_s8((const void*) ((const int8_t*) w + 56));
vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8);
const int8x8_t vk8x01234567 = vld1_s8((const void*) ((const int8_t*) w + 64));
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,675
| 37.821818
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p8c-minmax-rndnu-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_9p8c__neon_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 8)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 16)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 24)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 32)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 40)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 48)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7));
const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 56)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8));
const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 64)));
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 12,115
| 44.893939
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-dwconv/gen/qs8-dwconv-9p8c-minmax-rndnu-neon-mul8-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-neon-mul8.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
void xnn_qs8_dwconv_minmax_rndnu_ukernel_9p8c__neon_mul8_ld64(
size_t channels,
size_t output_width,
const int8_t** input,
const void* weights,
int8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const int8_t* zero,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
do {
const int8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
}
const int8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
}
const int8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
}
const int8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
}
const int8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
}
const int8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
}
const int8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
}
const int8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
}
const int8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const int8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vk0x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vk1x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
const int8x8_t vk2x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
const int8x8_t vk3x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
const int8x8_t vk4x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
const int8x8_t vk5x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
const int8x8_t vk6x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
const int8x8_t vk7x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
const int8x8_t vk8x01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vk0x01234567 = vld1_s8(w);
int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vk1x01234567 = vld1_s8((const void*) ((const int8_t*) w + 8));
vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi2x01234567 = vld1_s8(i2);
const int8x8_t vk2x01234567 = vld1_s8((const void*) ((const int8_t*) w + 16));
vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi3x01234567 = vld1_s8(i3);
const int8x8_t vk3x01234567 = vld1_s8((const void*) ((const int8_t*) w + 24));
vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi4x01234567 = vld1_s8(i4);
const int8x8_t vk4x01234567 = vld1_s8((const void*) ((const int8_t*) w + 32));
vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi5x01234567 = vld1_s8(i5);
const int8x8_t vk5x01234567 = vld1_s8((const void*) ((const int8_t*) w + 40));
vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi6x01234567 = vld1_s8(i6);
const int8x8_t vk6x01234567 = vld1_s8((const void*) ((const int8_t*) w + 48));
vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi7x01234567 = vld1_s8(i7);
const int8x8_t vk7x01234567 = vld1_s8((const void*) ((const int8_t*) w + 56));
vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
const int8x8_t vi8x01234567 = vld1_s8(i8);
const int8x8_t vk8x01234567 = vld1_s8((const void*) ((const int8_t*) w + 64));
vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
}
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,651
| 39.041237
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__avx_x16(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m128i vx0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
__m128i vx89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 8)));
__m128i vxCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 12)));
input += 16;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
vx89AB = _mm_add_epi32(vx89AB, vminus_zero_point);
vxCDEF = _mm_add_epi32(vxCDEF, vminus_zero_point);
const __m256i vx01234567 = _mm256_insertf128_si256(_mm256_castsi128_si256(vx0123), vx4567, 1);
const __m256i vx89ABCDEF = _mm256_insertf128_si256(_mm256_castsi128_si256(vx89AB), vxCDEF, 1);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
__m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 3 * sizeof(int8_t));
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 3,029
| 33.431818
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-avx-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__avx_x24(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
__m128i vx0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
__m128i vx89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 8)));
__m128i vxCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 12)));
__m128i vxGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 16)));
__m128i vxKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 20)));
input += 24;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
vx89AB = _mm_add_epi32(vx89AB, vminus_zero_point);
vxCDEF = _mm_add_epi32(vxCDEF, vminus_zero_point);
vxGHIJ = _mm_add_epi32(vxGHIJ, vminus_zero_point);
vxKLMN = _mm_add_epi32(vxKLMN, vminus_zero_point);
const __m256i vx01234567 = _mm256_insertf128_si256(_mm256_castsi128_si256(vx0123), vx4567, 1);
const __m256i vx89ABCDEF = _mm256_insertf128_si256(_mm256_castsi128_si256(vx89AB), vxCDEF, 1);
const __m256i vxGHIJKLMN = _mm256_insertf128_si256(_mm256_castsi128_si256(vxGHIJ), vxKLMN, 1);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
__m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
__m256 vyGHIJKLMN = _mm256_cvtepi32_ps(vxGHIJKLMN);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
vyGHIJKLMN = _mm256_mul_ps(vyGHIJKLMN, vscale);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
_mm256_storeu_ps(output + 16, vyGHIJKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 3 * sizeof(int8_t));
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 3,587
| 36.375
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-avx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__avx_x32(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m128i vx0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
__m128i vx89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 8)));
__m128i vxCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 12)));
__m128i vxGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 16)));
__m128i vxKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 20)));
__m128i vxOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 24)));
__m128i vxSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 28)));
input += 32;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
vx89AB = _mm_add_epi32(vx89AB, vminus_zero_point);
vxCDEF = _mm_add_epi32(vxCDEF, vminus_zero_point);
vxGHIJ = _mm_add_epi32(vxGHIJ, vminus_zero_point);
vxKLMN = _mm_add_epi32(vxKLMN, vminus_zero_point);
vxOPQR = _mm_add_epi32(vxOPQR, vminus_zero_point);
vxSTUV = _mm_add_epi32(vxSTUV, vminus_zero_point);
const __m256i vx01234567 = _mm256_insertf128_si256(_mm256_castsi128_si256(vx0123), vx4567, 1);
const __m256i vx89ABCDEF = _mm256_insertf128_si256(_mm256_castsi128_si256(vx89AB), vxCDEF, 1);
const __m256i vxGHIJKLMN = _mm256_insertf128_si256(_mm256_castsi128_si256(vxGHIJ), vxKLMN, 1);
const __m256i vxOPQRSTUV = _mm256_insertf128_si256(_mm256_castsi128_si256(vxOPQR), vxSTUV, 1);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
__m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
__m256 vyGHIJKLMN = _mm256_cvtepi32_ps(vxGHIJKLMN);
__m256 vyOPQRSTUV = _mm256_cvtepi32_ps(vxOPQRSTUV);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
vyGHIJKLMN = _mm256_mul_ps(vyGHIJKLMN, vscale);
vyOPQRSTUV = _mm256_mul_ps(vyOPQRSTUV, vscale);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
_mm256_storeu_ps(output + 16, vyGHIJKLMN);
_mm256_storeu_ps(output + 24, vyOPQRSTUV);
output += 32;
}
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 3 * sizeof(int8_t));
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 4,145
| 38.865385
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__avx_x8(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vx0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
input += 8;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
const __m256i vx01234567 = _mm256_insertf128_si256(_mm256_castsi128_si256(vx0123), vx4567, 1);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
_mm256_storeu_ps(output, vy01234567);
output += 8;
}
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 3 * sizeof(int8_t));
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 2,468
| 29.8625
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-avx2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__avx2_x16(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vminus_zero_point = _mm256_load_si256((const __m256i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input));
__m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input + 8)));
input += 16;
vx01234567 = _mm256_add_epi32(vx01234567, vminus_zero_point);
vx89ABCDEF = _mm256_add_epi32(vx89ABCDEF, vminus_zero_point);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
__m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m256i vx = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
input += 8;
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m256i vx = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 2,722
| 30.298851
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-avx2-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__avx2_x24(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vminus_zero_point = _mm256_load_si256((const __m256i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
__m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input));
__m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input + 8)));
__m256i vxGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input + 16)));
input += 24;
vx01234567 = _mm256_add_epi32(vx01234567, vminus_zero_point);
vx89ABCDEF = _mm256_add_epi32(vx89ABCDEF, vminus_zero_point);
vxGHIJKLMN = _mm256_add_epi32(vxGHIJKLMN, vminus_zero_point);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
__m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
__m256 vyGHIJKLMN = _mm256_cvtepi32_ps(vxGHIJKLMN);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
vyGHIJKLMN = _mm256_mul_ps(vyGHIJKLMN, vscale);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
_mm256_storeu_ps(output + 16, vyGHIJKLMN);
output += 24;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m256i vx = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
input += 8;
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m256i vx = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 3,038
| 32.032609
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-avx2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__avx2_x32(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vminus_zero_point = _mm256_load_si256((const __m256i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input));
__m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input + 8)));
__m256i vxGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input + 16)));
__m256i vxOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input + 24)));
input += 32;
vx01234567 = _mm256_add_epi32(vx01234567, vminus_zero_point);
vx89ABCDEF = _mm256_add_epi32(vx89ABCDEF, vminus_zero_point);
vxGHIJKLMN = _mm256_add_epi32(vxGHIJKLMN, vminus_zero_point);
vxOPQRSTUV = _mm256_add_epi32(vxOPQRSTUV, vminus_zero_point);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
__m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
__m256 vyGHIJKLMN = _mm256_cvtepi32_ps(vxGHIJKLMN);
__m256 vyOPQRSTUV = _mm256_cvtepi32_ps(vxOPQRSTUV);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
vyGHIJKLMN = _mm256_mul_ps(vyGHIJKLMN, vscale);
vyOPQRSTUV = _mm256_mul_ps(vyOPQRSTUV, vscale);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
_mm256_storeu_ps(output + 16, vyGHIJKLMN);
_mm256_storeu_ps(output + 24, vyOPQRSTUV);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m256i vx = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
input += 8;
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m256i vx = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 3,354
| 33.587629
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-avx2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__avx2_x8(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vminus_zero_point = _mm256_load_si256((const __m256i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input));
input += 8;
vx01234567 = _mm256_add_epi32(vx01234567, vminus_zero_point);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
_mm256_storeu_ps(output, vy01234567);
output += 8;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m256i vx = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
input += 8;
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m256i vx = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 2,403
| 28.317073
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-avx512skx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__avx512skx_x16(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vminus_zero_point = _mm512_load_si512(params->avx512.minus_zero_point);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m512i vx = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
input += 16;
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
// Prepare mask for valid elements (depends on batch).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512i vx = _mm512_cvtepi8_epi32(_mm_maskz_loadu_epi8(vmask, input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 1,775
| 29.101695
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-avx512skx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__avx512skx_x32(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vminus_zero_point = _mm512_load_si512(params->avx512.minus_zero_point);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m512i vx0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) input));
__m512i vxGHIJKLMNOPQRSTUV = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) (input + 16)));
input += 32;
vx0123456789ABCDEF = _mm512_add_epi32(vx0123456789ABCDEF, vminus_zero_point);
vxGHIJKLMNOPQRSTUV = _mm512_add_epi32(vxGHIJKLMNOPQRSTUV, vminus_zero_point);
__m512 vy0123456789ABCDEF = _mm512_cvtepi32_ps(vx0123456789ABCDEF);
__m512 vyGHIJKLMNOPQRSTUV = _mm512_cvtepi32_ps(vxGHIJKLMNOPQRSTUV);
vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vscale);
vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vyGHIJKLMNOPQRSTUV, vscale);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m512i vx = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
input += 16;
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
// Prepare mask for valid elements (depends on batch).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512i vx = _mm512_cvtepi8_epi32(_mm_maskz_loadu_epi8(vmask, input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 2,637
| 33.25974
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-avx512skx-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__avx512skx_x48(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vminus_zero_point = _mm512_load_si512(params->avx512.minus_zero_point);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
for (; batch >= 48 * sizeof(int8_t); batch -= 48 * sizeof(int8_t)) {
__m512i vx0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) input));
__m512i vxGHIJKLMNOPQRSTUV = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) (input + 16)));
__m512i vxWXYZ = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) (input + 32)));
input += 48;
vx0123456789ABCDEF = _mm512_add_epi32(vx0123456789ABCDEF, vminus_zero_point);
vxGHIJKLMNOPQRSTUV = _mm512_add_epi32(vxGHIJKLMNOPQRSTUV, vminus_zero_point);
vxWXYZ = _mm512_add_epi32(vxWXYZ, vminus_zero_point);
__m512 vy0123456789ABCDEF = _mm512_cvtepi32_ps(vx0123456789ABCDEF);
__m512 vyGHIJKLMNOPQRSTUV = _mm512_cvtepi32_ps(vxGHIJKLMNOPQRSTUV);
__m512 vyWXYZ = _mm512_cvtepi32_ps(vxWXYZ);
vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vscale);
vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vyGHIJKLMNOPQRSTUV, vscale);
vyWXYZ = _mm512_mul_ps(vyWXYZ, vscale);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vyGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output + 32, vyWXYZ);
output += 48;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m512i vx = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
input += 16;
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
// Prepare mask for valid elements (depends on batch).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512i vx = _mm512_cvtepi8_epi32(_mm_maskz_loadu_epi8(vmask, input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 2,921
| 34.634146
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-avx512skx-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__avx512skx_x64(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vminus_zero_point = _mm512_load_si512(params->avx512.minus_zero_point);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
for (; batch >= 64 * sizeof(int8_t); batch -= 64 * sizeof(int8_t)) {
__m512i vx0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) input));
__m512i vxGHIJKLMNOPQRSTUV = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) (input + 16)));
__m512i vxWXYZ = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) (input + 32)));
__m512i vx = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) (input + 48)));
input += 64;
vx0123456789ABCDEF = _mm512_add_epi32(vx0123456789ABCDEF, vminus_zero_point);
vxGHIJKLMNOPQRSTUV = _mm512_add_epi32(vxGHIJKLMNOPQRSTUV, vminus_zero_point);
vxWXYZ = _mm512_add_epi32(vxWXYZ, vminus_zero_point);
vx = _mm512_add_epi32(vx, vminus_zero_point);
__m512 vy0123456789ABCDEF = _mm512_cvtepi32_ps(vx0123456789ABCDEF);
__m512 vyGHIJKLMNOPQRSTUV = _mm512_cvtepi32_ps(vxGHIJKLMNOPQRSTUV);
__m512 vyWXYZ = _mm512_cvtepi32_ps(vxWXYZ);
__m512 vy = _mm512_cvtepi32_ps(vx);
vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vscale);
vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vyGHIJKLMNOPQRSTUV, vscale);
vyWXYZ = _mm512_mul_ps(vyWXYZ, vscale);
vy = _mm512_mul_ps(vy, vscale);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vyGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output + 32, vyWXYZ);
_mm512_storeu_ps(output + 48, vy);
output += 64;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m512i vx = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
input += 16;
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
// Prepare mask for valid elements (depends on batch).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512i vx = _mm512_cvtepi8_epi32(_mm_maskz_loadu_epi8(vmask, input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 3,173
| 35.482759
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__neon_x16(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vminus_zero_point = vreinterpretq_s16_u32(vld1q_dup_u32((const void*) params->neon.minus_zero_point));
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x8_t vx01234567 = vld1_s8(input); input += 8;
const int8x8_t vx89ABCDEF = vld1_s8(input); input += 8;
const int16x8_t vhx01234567 = vaddw_s8(vminus_zero_point, vx01234567);
const int16x8_t vhx89ABCDEF = vaddw_s8(vminus_zero_point, vx89ABCDEF);
const int32x4_t vwx0123 = vmovl_s16(vget_low_s16(vhx01234567));
const int32x4_t vwx4567 = vmovl_s16(vget_high_s16(vhx01234567));
const int32x4_t vwx89AB = vmovl_s16(vget_low_s16(vhx89ABCDEF));
const int32x4_t vwxCDEF = vmovl_s16(vget_high_s16(vhx89ABCDEF));
float32x4_t vy0123 = vcvtq_f32_s32(vwx0123);
float32x4_t vy4567 = vcvtq_f32_s32(vwx4567);
float32x4_t vy89AB = vcvtq_f32_s32(vwx89AB);
float32x4_t vyCDEF = vcvtq_f32_s32(vwxCDEF);
vy0123 = vmulq_f32(vy0123, vscale);
vy4567 = vmulq_f32(vy4567, vscale);
vy89AB = vmulq_f32(vy89AB, vscale);
vyCDEF = vmulq_f32(vyCDEF, vscale);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
const int16x8_t vhx = vaddw_s8(vminus_zero_point, vx);
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy_lo = vcvtq_f32_s32(vwx_lo);
float32x4_t vy_hi = vcvtq_f32_s32(vwx_hi);
vy_lo = vmulq_f32(vy_lo, vscale);
vy_hi = vmulq_f32(vy_hi, vscale);
vst1q_f32(output, vy_lo); output += 4;
vst1q_f32(output, vy_hi); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
const int16x8_t vhx = vaddw_s8(vminus_zero_point, vx);
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy = vcvtq_f32_s32(vwx_lo);
vy = vmulq_f32(vy, vscale);
if (batch & (4 * sizeof(int8_t))) {
vst1q_f32(output, vy); output += 4;
vy = vcvtq_f32_s32(vwx_hi);
vy = vmulq_f32(vy, vscale);
}
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(int8_t))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 3,456
| 31.92381
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-neon-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__neon_x24(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vminus_zero_point = vreinterpretq_s16_u32(vld1q_dup_u32((const void*) params->neon.minus_zero_point));
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
const int8x8_t vx01234567 = vld1_s8(input); input += 8;
const int8x8_t vx89ABCDEF = vld1_s8(input); input += 8;
const int8x8_t vxGHIJKLMN = vld1_s8(input); input += 8;
const int16x8_t vhx01234567 = vaddw_s8(vminus_zero_point, vx01234567);
const int16x8_t vhx89ABCDEF = vaddw_s8(vminus_zero_point, vx89ABCDEF);
const int16x8_t vhxGHIJKLMN = vaddw_s8(vminus_zero_point, vxGHIJKLMN);
const int32x4_t vwx0123 = vmovl_s16(vget_low_s16(vhx01234567));
const int32x4_t vwx4567 = vmovl_s16(vget_high_s16(vhx01234567));
const int32x4_t vwx89AB = vmovl_s16(vget_low_s16(vhx89ABCDEF));
const int32x4_t vwxCDEF = vmovl_s16(vget_high_s16(vhx89ABCDEF));
const int32x4_t vwxGHIJ = vmovl_s16(vget_low_s16(vhxGHIJKLMN));
const int32x4_t vwxKLMN = vmovl_s16(vget_high_s16(vhxGHIJKLMN));
float32x4_t vy0123 = vcvtq_f32_s32(vwx0123);
float32x4_t vy4567 = vcvtq_f32_s32(vwx4567);
float32x4_t vy89AB = vcvtq_f32_s32(vwx89AB);
float32x4_t vyCDEF = vcvtq_f32_s32(vwxCDEF);
float32x4_t vyGHIJ = vcvtq_f32_s32(vwxGHIJ);
float32x4_t vyKLMN = vcvtq_f32_s32(vwxKLMN);
vy0123 = vmulq_f32(vy0123, vscale);
vy4567 = vmulq_f32(vy4567, vscale);
vy89AB = vmulq_f32(vy89AB, vscale);
vyCDEF = vmulq_f32(vyCDEF, vscale);
vyGHIJ = vmulq_f32(vyGHIJ, vscale);
vyKLMN = vmulq_f32(vyKLMN, vscale);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
vst1q_f32(output, vyGHIJ); output += 4;
vst1q_f32(output, vyKLMN); output += 4;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
const int16x8_t vhx = vaddw_s8(vminus_zero_point, vx);
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy_lo = vcvtq_f32_s32(vwx_lo);
float32x4_t vy_hi = vcvtq_f32_s32(vwx_hi);
vy_lo = vmulq_f32(vy_lo, vscale);
vy_hi = vmulq_f32(vy_hi, vscale);
vst1q_f32(output, vy_lo); output += 4;
vst1q_f32(output, vy_hi); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
const int16x8_t vhx = vaddw_s8(vminus_zero_point, vx);
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy = vcvtq_f32_s32(vwx_lo);
vy = vmulq_f32(vy, vscale);
if (batch & (4 * sizeof(int8_t))) {
vst1q_f32(output, vy); output += 4;
vy = vcvtq_f32_s32(vwx_hi);
vy = vmulq_f32(vy, vscale);
}
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(int8_t))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 3,994
| 33.73913
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__neon_x32(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vminus_zero_point = vreinterpretq_s16_u32(vld1q_dup_u32((const void*) params->neon.minus_zero_point));
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const int8x8_t vx01234567 = vld1_s8(input); input += 8;
const int8x8_t vx89ABCDEF = vld1_s8(input); input += 8;
const int8x8_t vxGHIJKLMN = vld1_s8(input); input += 8;
const int8x8_t vxOPQRSTUV = vld1_s8(input); input += 8;
const int16x8_t vhx01234567 = vaddw_s8(vminus_zero_point, vx01234567);
const int16x8_t vhx89ABCDEF = vaddw_s8(vminus_zero_point, vx89ABCDEF);
const int16x8_t vhxGHIJKLMN = vaddw_s8(vminus_zero_point, vxGHIJKLMN);
const int16x8_t vhxOPQRSTUV = vaddw_s8(vminus_zero_point, vxOPQRSTUV);
const int32x4_t vwx0123 = vmovl_s16(vget_low_s16(vhx01234567));
const int32x4_t vwx4567 = vmovl_s16(vget_high_s16(vhx01234567));
const int32x4_t vwx89AB = vmovl_s16(vget_low_s16(vhx89ABCDEF));
const int32x4_t vwxCDEF = vmovl_s16(vget_high_s16(vhx89ABCDEF));
const int32x4_t vwxGHIJ = vmovl_s16(vget_low_s16(vhxGHIJKLMN));
const int32x4_t vwxKLMN = vmovl_s16(vget_high_s16(vhxGHIJKLMN));
const int32x4_t vwxOPQR = vmovl_s16(vget_low_s16(vhxOPQRSTUV));
const int32x4_t vwxSTUV = vmovl_s16(vget_high_s16(vhxOPQRSTUV));
float32x4_t vy0123 = vcvtq_f32_s32(vwx0123);
float32x4_t vy4567 = vcvtq_f32_s32(vwx4567);
float32x4_t vy89AB = vcvtq_f32_s32(vwx89AB);
float32x4_t vyCDEF = vcvtq_f32_s32(vwxCDEF);
float32x4_t vyGHIJ = vcvtq_f32_s32(vwxGHIJ);
float32x4_t vyKLMN = vcvtq_f32_s32(vwxKLMN);
float32x4_t vyOPQR = vcvtq_f32_s32(vwxOPQR);
float32x4_t vySTUV = vcvtq_f32_s32(vwxSTUV);
vy0123 = vmulq_f32(vy0123, vscale);
vy4567 = vmulq_f32(vy4567, vscale);
vy89AB = vmulq_f32(vy89AB, vscale);
vyCDEF = vmulq_f32(vyCDEF, vscale);
vyGHIJ = vmulq_f32(vyGHIJ, vscale);
vyKLMN = vmulq_f32(vyKLMN, vscale);
vyOPQR = vmulq_f32(vyOPQR, vscale);
vySTUV = vmulq_f32(vySTUV, vscale);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
vst1q_f32(output, vyGHIJ); output += 4;
vst1q_f32(output, vyKLMN); output += 4;
vst1q_f32(output, vyOPQR); output += 4;
vst1q_f32(output, vySTUV); output += 4;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
const int16x8_t vhx = vaddw_s8(vminus_zero_point, vx);
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy_lo = vcvtq_f32_s32(vwx_lo);
float32x4_t vy_hi = vcvtq_f32_s32(vwx_hi);
vy_lo = vmulq_f32(vy_lo, vscale);
vy_hi = vmulq_f32(vy_hi, vscale);
vst1q_f32(output, vy_lo); output += 4;
vst1q_f32(output, vy_hi); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
const int16x8_t vhx = vaddw_s8(vminus_zero_point, vx);
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy = vcvtq_f32_s32(vwx_lo);
vy = vmulq_f32(vy, vscale);
if (batch & (4 * sizeof(int8_t))) {
vst1q_f32(output, vy); output += 4;
vy = vcvtq_f32_s32(vwx_hi);
vy = vmulq_f32(vy, vscale);
}
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(int8_t))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 4,532
| 35.264
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__neon_x8(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vminus_zero_point = vreinterpretq_s16_u32(vld1q_dup_u32((const void*) params->neon.minus_zero_point));
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
const int16x8_t vhx = vaddw_s8(vminus_zero_point, vx);
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy_lo = vcvtq_f32_s32(vwx_lo);
float32x4_t vy_hi = vcvtq_f32_s32(vwx_hi);
vy_lo = vmulq_f32(vy_lo, vscale);
vy_hi = vmulq_f32(vy_hi, vscale);
vst1q_f32(output, vy_lo); output += 4;
vst1q_f32(output, vy_hi); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
const int16x8_t vhx = vaddw_s8(vminus_zero_point, vx);
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy = vcvtq_f32_s32(vwx_lo);
vy = vmulq_f32(vy, vscale);
if (batch & (4 * sizeof(int8_t))) {
vst1q_f32(output, vy); output += 4;
vy = vcvtq_f32_s32(vwx_hi);
vy = vmulq_f32(vy, vscale);
}
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(int8_t))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 2,299
| 28.487179
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__scalar_x1(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vzero_point = params->scalar.zero_point;
const float vscale = params->scalar.scale;
do {
int32_t vx = *input++;
vx -= vzero_point;
float vy = (float) vx;
vy *= vscale;
*output++ = vy;
batch -= sizeof(int8_t);
} while (batch != 0);
}
| 958
| 21.833333
| 76
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.