repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qu8-avgpool/qu8-avgpool-9x-minmax-fp32-neon-c8.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/avgpool.h>
#include <xnnpack/common.h>
void xnn_qu8_avgpool_minmax_fp32_ukernel_9x__neon_c8(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
const uint8_t* zero,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neon.output_max);
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
const uint8_t* i1 = input[1];
const uint8_t* i2 = input[2];
const uint8_t* i3 = input[3];
const uint8_t* i4 = input[4];
const uint8_t* i5 = input[5];
const uint8_t* i6 = input[6];
const uint8_t* i7 = input[7];
const uint8_t* i8 = input[8];
input = (const uint8_t**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
while (c >= 8) {
const uint8x8_t vi0 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2 = vld1_u8(i2); i2 += 8;
const uint8x8_t vi3 = vld1_u8(i3); i3 += 8;
const uint8x8_t vi4 = vld1_u8(i4); i4 += 8;
const uint8x8_t vi5 = vld1_u8(i5); i5 += 8;
const uint8x8_t vi6 = vld1_u8(i6); i6 += 8;
const uint8x8_t vi7 = vld1_u8(i7); i7 += 8;
const uint8x8_t vi8 = vld1_u8(i8); i8 += 8;
const uint16x8_t vsum018 = vaddw_u8(vaddl_u8(vi0, vi1), vi8);
const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
const uint16x8_t vsum2345 = vaddq_u16(vsum23, vsum45);
const uint16x8_t vsum01678 = vaddq_u16(vsum018, vsum67);
const uint16x8_t vsum = vaddq_u16(vsum2345, vsum01678);
int32x4_t vacc_lo = vaddw_s16(vinit_bias, vreinterpret_s16_u16(vget_low_u16(vsum)));
int32x4_t vacc_hi = vaddw_s16(vinit_bias, vreinterpret_s16_u16(vget_high_u16(vsum)));
float32x4_t vfpacc_lo = vcvtq_f32_s32(vacc_lo);
float32x4_t vfpacc_hi = vcvtq_f32_s32(vacc_hi);
vfpacc_lo = vmulq_f32(vfpacc_lo, vscale);
vfpacc_hi = vmulq_f32(vfpacc_hi, vscale);
vacc_lo = vreinterpretq_s32_f32(vaddq_f32(vfpacc_lo, vmagic_bias));
vacc_hi = vreinterpretq_s32_f32(vaddq_f32(vfpacc_hi, vmagic_bias));
vacc_lo = vqsubq_s32(vacc_lo, vmagic_bias_less_output_zero_point);
vacc_hi = vqsubq_s32(vacc_hi, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc = vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi);
#else
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
#endif
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
vst1_u8(output, vout); output += 8;
c -= 8;
}
if (c != 0) {
const uint8x8_t vi0 = vld1_u8(i0);
const uint8x8_t vi1 = vld1_u8(i1);
const uint8x8_t vi2 = vld1_u8(i2);
const uint8x8_t vi3 = vld1_u8(i3);
const uint8x8_t vi4 = vld1_u8(i4);
const uint8x8_t vi5 = vld1_u8(i5);
const uint8x8_t vi6 = vld1_u8(i6);
const uint8x8_t vi7 = vld1_u8(i7);
const uint8x8_t vi8 = vld1_u8(i8);
const uint16x8_t vsum018 = vaddw_u8(vaddl_u8(vi0, vi1), vi8);
const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
const uint16x8_t vsum2345 = vaddq_u16(vsum23, vsum45);
const uint16x8_t vsum01678 = vaddq_u16(vsum018, vsum67);
const uint16x8_t vsum = vaddq_u16(vsum2345, vsum01678);
int32x4_t vacc_lo = vaddw_s16(vinit_bias, vreinterpret_s16_u16(vget_low_u16(vsum)));
int32x4_t vacc_hi = vaddw_s16(vinit_bias, vreinterpret_s16_u16(vget_high_u16(vsum)));
float32x4_t vfpacc_lo = vcvtq_f32_s32(vacc_lo);
float32x4_t vfpacc_hi = vcvtq_f32_s32(vacc_hi);
vfpacc_lo = vmulq_f32(vfpacc_lo, vscale);
vfpacc_hi = vmulq_f32(vfpacc_hi, vscale);
vacc_lo = vreinterpretq_s32_f32(vaddq_f32(vfpacc_lo, vmagic_bias));
vacc_hi = vreinterpretq_s32_f32(vaddq_f32(vfpacc_hi, vmagic_bias));
vacc_lo = vqsubq_s32(vacc_lo, vmagic_bias_less_output_zero_point);
vacc_hi = vqsubq_s32(vacc_hi, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc = vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi);
#else
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
#endif
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout), 0); output += 4;
vout = vext_u8(vout, vout, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout), 0); output += 2;
vout = vext_u8(vout, vout, 2);
}
if (c & 1) {
vst1_lane_u8(output, vout, 0); output += 1;
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 7,627
| 33.206278
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-avgpool/qu8-avgpool-9x-minmax-fp32-scalar-imagic-c1.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/avgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_avgpool_minmax_fp32_ukernel_9x__scalar_imagic_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
const uint8_t* zero,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
const uint8_t* i1 = input[1];
const uint8_t* i2 = input[2];
const uint8_t* i3 = input[3];
const uint8_t* i4 = input[4];
const uint8_t* i5 = input[5];
const uint8_t* i6 = input[6];
const uint8_t* i7 = input[7];
const uint8_t* i8 = input[8];
input = (const uint8_t**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
vacc += vi0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
vacc += vi1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
vacc += vi2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
vacc += vi3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
vacc += vi4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
vacc += vi5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
vacc += vi6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
vacc += vi7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
vacc += vi8;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 4,384
| 29.241379
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-avgpool/qu8-avgpool-9x-minmax-fp32-sse2-c8.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/avgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_avgpool_minmax_fp32_ukernel_9x__sse2_c8(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
const uint8_t* zero,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const __m128i vzero = _mm_setzero_si128();
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
const uint8_t* i1 = input[1];
const uint8_t* i2 = input[2];
const uint8_t* i3 = input[3];
const uint8_t* i4 = input[4];
const uint8_t* i5 = input[5];
const uint8_t* i6 = input[6];
const uint8_t* i7 = input[7];
const uint8_t* i8 = input[8];
input = (const uint8_t**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
while (c >= 8) {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*) i0); i0 += 8;
const __m128i vi1 = _mm_loadl_epi64((const __m128i*) i1); i1 += 8;
const __m128i vi2 = _mm_loadl_epi64((const __m128i*) i2); i2 += 8;
const __m128i vi3 = _mm_loadl_epi64((const __m128i*) i3); i3 += 8;
const __m128i vi4 = _mm_loadl_epi64((const __m128i*) i4); i4 += 8;
const __m128i vi5 = _mm_loadl_epi64((const __m128i*) i5); i5 += 8;
const __m128i vi6 = _mm_loadl_epi64((const __m128i*) i6); i6 += 8;
const __m128i vi7 = _mm_loadl_epi64((const __m128i*) i7); i7 += 8;
const __m128i vi8 = _mm_loadl_epi64((const __m128i*) i8); i8 += 8;
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vxi8 = _mm_unpacklo_epi8(vi8, vzero);
const __m128i vsum018 = _mm_add_epi16(_mm_add_epi16(vxi0, vxi1), vxi8);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum2345 = _mm_add_epi16(vsum23, vsum45);
const __m128i vsum01678 = _mm_add_epi16(vsum018, vsum67);
const __m128i vsum = _mm_add_epi16(vsum2345, vsum01678);
__m128i vacc_lo = _mm_add_epi32(vinit_bias, _mm_unpacklo_epi16(vsum, vzero));
__m128i vacc_hi = _mm_add_epi32(vinit_bias, _mm_unpackhi_epi16(vsum, vzero));
__m128 vfpacc_lo = _mm_cvtepi32_ps(vacc_lo);
__m128 vfpacc_hi = _mm_cvtepi32_ps(vacc_hi);
vfpacc_lo = _mm_mul_ps(vfpacc_lo, vscale);
vfpacc_hi = _mm_mul_ps(vfpacc_hi, vscale);
vfpacc_lo = _mm_min_ps(vfpacc_lo, voutput_max_less_zero_point);
vfpacc_hi = _mm_min_ps(vfpacc_hi, voutput_max_less_zero_point);
vacc_lo = _mm_cvtps_epi32(vfpacc_lo);
vacc_hi = _mm_cvtps_epi32(vfpacc_hi);
__m128i vout = _mm_adds_epi16(_mm_packs_epi32(vacc_lo, vacc_hi), voutput_zero_point);
vout = _mm_packus_epi16(vout, vout);
vout = _mm_max_epu8(vout, voutput_min);
_mm_storel_epi64((__m128i*) output, vout);
output += 8;
c -= 8;
}
if (c != 0) {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vi1 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vi2 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vi3 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vi4 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vi5 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vi6 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vi7 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vi8 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vxi8 = _mm_unpacklo_epi8(vi8, vzero);
const __m128i vsum018 = _mm_add_epi16(_mm_add_epi16(vxi0, vxi1), vxi8);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum2345 = _mm_add_epi16(vsum23, vsum45);
const __m128i vsum01678 = _mm_add_epi16(vsum018, vsum67);
const __m128i vsum = _mm_add_epi16(vsum2345, vsum01678);
__m128i vacc_lo = _mm_add_epi32(vinit_bias, _mm_unpacklo_epi16(vsum, vzero));
__m128i vacc_hi = _mm_add_epi32(vinit_bias, _mm_unpackhi_epi16(vsum, vzero));
__m128 vfpacc_lo = _mm_cvtepi32_ps(vacc_lo);
__m128 vfpacc_hi = _mm_cvtepi32_ps(vacc_hi);
vfpacc_lo = _mm_mul_ps(vfpacc_lo, vscale);
vfpacc_hi = _mm_mul_ps(vfpacc_hi, vscale);
vfpacc_lo = _mm_min_ps(vfpacc_lo, voutput_max_less_zero_point);
vfpacc_hi = _mm_min_ps(vfpacc_hi, voutput_max_less_zero_point);
vacc_lo = _mm_cvtps_epi32(vfpacc_lo);
vacc_hi = _mm_cvtps_epi32(vfpacc_hi);
__m128i vout = _mm_adds_epi16(_mm_packs_epi32(vacc_lo, vacc_hi), voutput_zero_point);
vout = _mm_packus_epi16(vout, vout);
vout = _mm_max_epu8(vout, voutput_min);
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout));
output += 4;
vout = _mm_srli_epi64(vout, 32);
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout, 0));
output += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (c & 1) {
*output = (uint8_t) _mm_cvtsi128_si32(vout);
output += 1;
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 8,756
| 36.26383
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-25p1c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_25p1c__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
const uint8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
}
const uint8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
}
const uint8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
}
const uint8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
}
const uint8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
}
const uint8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
}
const uint8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
}
const uint8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
}
const uint8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
}
const uint8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
}
const uint8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
}
const uint8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
}
const uint8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
}
const uint8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
}
const uint8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
}
const uint8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) (uint32_t) *i9++;
const int32_t vk9 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[9] - vkernel_zero_point;
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) (uint32_t) *i10++;
const int32_t vk10 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) (uint32_t) *i11++;
const int32_t vk11 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[11] - vkernel_zero_point;
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) (uint32_t) *i12++;
const int32_t vk12 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) (uint32_t) *i13++;
const int32_t vk13 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[13] - vkernel_zero_point;
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) (uint32_t) *i14++;
const int32_t vk14 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) (uint32_t) *i15++;
const int32_t vk15 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[15] - vkernel_zero_point;
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) (uint32_t) *i16++;
const int32_t vk16 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) (uint32_t) *i17++;
const int32_t vk17 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[17] - vkernel_zero_point;
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) (uint32_t) *i18++;
const int32_t vk18 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[18] - vkernel_zero_point;
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) (uint32_t) *i19++;
const int32_t vk19 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[19] - vkernel_zero_point;
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) (uint32_t) *i20++;
const int32_t vk20 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[20] - vkernel_zero_point;
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) (uint32_t) *i21++;
const int32_t vk21 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[21] - vkernel_zero_point;
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) (uint32_t) *i22++;
const int32_t vk22 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[22] - vkernel_zero_point;
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) (uint32_t) *i23++;
const int32_t vk23 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[23] - vkernel_zero_point;
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) (uint32_t) *i24++;
const int32_t vk24 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[24] - vkernel_zero_point;
vacc += vi24 * vk24;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,441
| 42.671756
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-25p1c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_25p1c__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
const uint8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
}
const uint8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
}
const uint8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
}
const uint8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
}
const uint8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
}
const uint8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
}
const uint8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
}
const uint8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
}
const uint8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
}
const uint8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
}
const uint8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
}
const uint8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
}
const uint8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
}
const uint8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
}
const uint8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
}
const uint8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) (uint32_t) *i9++;
const int32_t vk9 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[9] - vkernel_zero_point;
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) (uint32_t) *i10++;
const int32_t vk10 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) (uint32_t) *i11++;
const int32_t vk11 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[11] - vkernel_zero_point;
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) (uint32_t) *i12++;
const int32_t vk12 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) (uint32_t) *i13++;
const int32_t vk13 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[13] - vkernel_zero_point;
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) (uint32_t) *i14++;
const int32_t vk14 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) (uint32_t) *i15++;
const int32_t vk15 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[15] - vkernel_zero_point;
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) (uint32_t) *i16++;
const int32_t vk16 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) (uint32_t) *i17++;
const int32_t vk17 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[17] - vkernel_zero_point;
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) (uint32_t) *i18++;
const int32_t vk18 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[18] - vkernel_zero_point;
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) (uint32_t) *i19++;
const int32_t vk19 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[19] - vkernel_zero_point;
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) (uint32_t) *i20++;
const int32_t vk20 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[20] - vkernel_zero_point;
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) (uint32_t) *i21++;
const int32_t vk21 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[21] - vkernel_zero_point;
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) (uint32_t) *i22++;
const int32_t vk22 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[22] - vkernel_zero_point;
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) (uint32_t) *i23++;
const int32_t vk23 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[23] - vkernel_zero_point;
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) (uint32_t) *i24++;
const int32_t vk24 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[24] - vkernel_zero_point;
vacc += vi24 * vk24;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,327
| 42.072243
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-25p1c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_25p1c__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
const uint8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
}
const uint8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
}
const uint8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
}
const uint8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
}
const uint8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
}
const uint8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
}
const uint8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
}
const uint8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
}
const uint8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
}
const uint8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
}
const uint8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
}
const uint8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
}
const uint8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
}
const uint8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
}
const uint8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
}
const uint8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) (uint32_t) *i9++;
const int32_t vk9 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[9] - vkernel_zero_point;
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) (uint32_t) *i10++;
const int32_t vk10 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) (uint32_t) *i11++;
const int32_t vk11 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[11] - vkernel_zero_point;
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) (uint32_t) *i12++;
const int32_t vk12 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) (uint32_t) *i13++;
const int32_t vk13 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[13] - vkernel_zero_point;
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) (uint32_t) *i14++;
const int32_t vk14 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) (uint32_t) *i15++;
const int32_t vk15 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[15] - vkernel_zero_point;
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) (uint32_t) *i16++;
const int32_t vk16 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) (uint32_t) *i17++;
const int32_t vk17 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[17] - vkernel_zero_point;
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) (uint32_t) *i18++;
const int32_t vk18 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[18] - vkernel_zero_point;
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) (uint32_t) *i19++;
const int32_t vk19 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[19] - vkernel_zero_point;
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) (uint32_t) *i20++;
const int32_t vk20 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[20] - vkernel_zero_point;
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) (uint32_t) *i21++;
const int32_t vk21 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[21] - vkernel_zero_point;
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) (uint32_t) *i22++;
const int32_t vk22 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[22] - vkernel_zero_point;
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) (uint32_t) *i23++;
const int32_t vk23 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[23] - vkernel_zero_point;
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) (uint32_t) *i24++;
const int32_t vk24 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[24] - vkernel_zero_point;
vacc += vi24 * vk24;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,345
| 42.305344
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-25p1c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_25p1c__wasm_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
const uint8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
}
const uint8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
}
const uint8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
}
const uint8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
}
const uint8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
}
const uint8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
}
const uint8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
}
const uint8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
}
const uint8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
}
const uint8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
}
const uint8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
}
const uint8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
}
const uint8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
}
const uint8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
}
const uint8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
}
const uint8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) (uint32_t) *i9++;
const int32_t vk9 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[9] - vkernel_zero_point;
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) (uint32_t) *i10++;
const int32_t vk10 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) (uint32_t) *i11++;
const int32_t vk11 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[11] - vkernel_zero_point;
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) (uint32_t) *i12++;
const int32_t vk12 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) (uint32_t) *i13++;
const int32_t vk13 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[13] - vkernel_zero_point;
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) (uint32_t) *i14++;
const int32_t vk14 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) (uint32_t) *i15++;
const int32_t vk15 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[15] - vkernel_zero_point;
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) (uint32_t) *i16++;
const int32_t vk16 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) (uint32_t) *i17++;
const int32_t vk17 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[17] - vkernel_zero_point;
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) (uint32_t) *i18++;
const int32_t vk18 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[18] - vkernel_zero_point;
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) (uint32_t) *i19++;
const int32_t vk19 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[19] - vkernel_zero_point;
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) (uint32_t) *i20++;
const int32_t vk20 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[20] - vkernel_zero_point;
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) (uint32_t) *i21++;
const int32_t vk21 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[21] - vkernel_zero_point;
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) (uint32_t) *i22++;
const int32_t vk22 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[22] - vkernel_zero_point;
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) (uint32_t) *i23++;
const int32_t vk23 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[23] - vkernel_zero_point;
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) (uint32_t) *i24++;
const int32_t vk24 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[24] - vkernel_zero_point;
vacc += vi24 * vk24;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,459
| 42.740458
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-25p2c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_25p2c__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
const uint8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
}
const uint8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
}
const uint8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
}
const uint8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
}
const uint8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
}
const uint8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
}
const uint8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
}
const uint8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
}
const uint8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
}
const uint8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
}
const uint8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
}
const uint8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
}
const uint8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
}
const uint8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
}
const uint8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
}
const uint8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
const int32_t vi9x0 = (int32_t) (uint32_t) i9[0];
const int32_t vi9x1 = (int32_t) (uint32_t) i9[1];
i9 += 2;
const int32_t vk9x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk9x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi9x0 * vk9x0;
vacc1 += vi9x1 * vk9x1;
const int32_t vi10x0 = (int32_t) (uint32_t) i10[0];
const int32_t vi10x1 = (int32_t) (uint32_t) i10[1];
i10 += 2;
const int32_t vk10x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20] - vkernel_zero_point;
const int32_t vk10x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[21] - vkernel_zero_point;
vacc0 += vi10x0 * vk10x0;
vacc1 += vi10x1 * vk10x1;
const int32_t vi11x0 = (int32_t) (uint32_t) i11[0];
const int32_t vi11x1 = (int32_t) (uint32_t) i11[1];
i11 += 2;
const int32_t vk11x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22] - vkernel_zero_point;
const int32_t vk11x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[23] - vkernel_zero_point;
vacc0 += vi11x0 * vk11x0;
vacc1 += vi11x1 * vk11x1;
const int32_t vi12x0 = (int32_t) (uint32_t) i12[0];
const int32_t vi12x1 = (int32_t) (uint32_t) i12[1];
i12 += 2;
const int32_t vk12x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24] - vkernel_zero_point;
const int32_t vk12x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[25] - vkernel_zero_point;
vacc0 += vi12x0 * vk12x0;
vacc1 += vi12x1 * vk12x1;
const int32_t vi13x0 = (int32_t) (uint32_t) i13[0];
const int32_t vi13x1 = (int32_t) (uint32_t) i13[1];
i13 += 2;
const int32_t vk13x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26] - vkernel_zero_point;
const int32_t vk13x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[27] - vkernel_zero_point;
vacc0 += vi13x0 * vk13x0;
vacc1 += vi13x1 * vk13x1;
const int32_t vi14x0 = (int32_t) (uint32_t) i14[0];
const int32_t vi14x1 = (int32_t) (uint32_t) i14[1];
i14 += 2;
const int32_t vk14x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28] - vkernel_zero_point;
const int32_t vk14x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[29] - vkernel_zero_point;
vacc0 += vi14x0 * vk14x0;
vacc1 += vi14x1 * vk14x1;
const int32_t vi15x0 = (int32_t) (uint32_t) i15[0];
const int32_t vi15x1 = (int32_t) (uint32_t) i15[1];
i15 += 2;
const int32_t vk15x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30] - vkernel_zero_point;
const int32_t vk15x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[31] - vkernel_zero_point;
vacc0 += vi15x0 * vk15x0;
vacc1 += vi15x1 * vk15x1;
const int32_t vi16x0 = (int32_t) (uint32_t) i16[0];
const int32_t vi16x1 = (int32_t) (uint32_t) i16[1];
i16 += 2;
const int32_t vk16x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32] - vkernel_zero_point;
const int32_t vk16x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[33] - vkernel_zero_point;
vacc0 += vi16x0 * vk16x0;
vacc1 += vi16x1 * vk16x1;
const int32_t vi17x0 = (int32_t) (uint32_t) i17[0];
const int32_t vi17x1 = (int32_t) (uint32_t) i17[1];
i17 += 2;
const int32_t vk17x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34] - vkernel_zero_point;
const int32_t vk17x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[35] - vkernel_zero_point;
vacc0 += vi17x0 * vk17x0;
vacc1 += vi17x1 * vk17x1;
const int32_t vi18x0 = (int32_t) (uint32_t) i18[0];
const int32_t vi18x1 = (int32_t) (uint32_t) i18[1];
i18 += 2;
const int32_t vk18x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36] - vkernel_zero_point;
const int32_t vk18x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[37] - vkernel_zero_point;
vacc0 += vi18x0 * vk18x0;
vacc1 += vi18x1 * vk18x1;
const int32_t vi19x0 = (int32_t) (uint32_t) i19[0];
const int32_t vi19x1 = (int32_t) (uint32_t) i19[1];
i19 += 2;
const int32_t vk19x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38] - vkernel_zero_point;
const int32_t vk19x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[39] - vkernel_zero_point;
vacc0 += vi19x0 * vk19x0;
vacc1 += vi19x1 * vk19x1;
const int32_t vi20x0 = (int32_t) (uint32_t) i20[0];
const int32_t vi20x1 = (int32_t) (uint32_t) i20[1];
i20 += 2;
const int32_t vk20x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40] - vkernel_zero_point;
const int32_t vk20x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[41] - vkernel_zero_point;
vacc0 += vi20x0 * vk20x0;
vacc1 += vi20x1 * vk20x1;
const int32_t vi21x0 = (int32_t) (uint32_t) i21[0];
const int32_t vi21x1 = (int32_t) (uint32_t) i21[1];
i21 += 2;
const int32_t vk21x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42] - vkernel_zero_point;
const int32_t vk21x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[43] - vkernel_zero_point;
vacc0 += vi21x0 * vk21x0;
vacc1 += vi21x1 * vk21x1;
const int32_t vi22x0 = (int32_t) (uint32_t) i22[0];
const int32_t vi22x1 = (int32_t) (uint32_t) i22[1];
i22 += 2;
const int32_t vk22x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44] - vkernel_zero_point;
const int32_t vk22x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[45] - vkernel_zero_point;
vacc0 += vi22x0 * vk22x0;
vacc1 += vi22x1 * vk22x1;
const int32_t vi23x0 = (int32_t) (uint32_t) i23[0];
const int32_t vi23x1 = (int32_t) (uint32_t) i23[1];
i23 += 2;
const int32_t vk23x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46] - vkernel_zero_point;
const int32_t vk23x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[47] - vkernel_zero_point;
vacc0 += vi23x0 * vk23x0;
vacc1 += vi23x1 * vk23x1;
const int32_t vi24x0 = (int32_t) (uint32_t) i24[0];
const int32_t vi24x1 = (int32_t) (uint32_t) i24[1];
i24 += 2;
const int32_t vk24x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48] - vkernel_zero_point;
const int32_t vk24x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[49] - vkernel_zero_point;
vacc0 += vi24x0 * vk24x0;
vacc1 += vi24x1 * vk24x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) (uint32_t) *i9;
const int32_t vk9 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18] - vkernel_zero_point;
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) (uint32_t) *i10;
const int32_t vk10 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20] - vkernel_zero_point;
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) (uint32_t) *i11;
const int32_t vk11 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22] - vkernel_zero_point;
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) (uint32_t) *i12;
const int32_t vk12 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24] - vkernel_zero_point;
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) (uint32_t) *i13;
const int32_t vk13 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26] - vkernel_zero_point;
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) (uint32_t) *i14;
const int32_t vk14 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28] - vkernel_zero_point;
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) (uint32_t) *i15;
const int32_t vk15 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30] - vkernel_zero_point;
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) (uint32_t) *i16;
const int32_t vk16 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32] - vkernel_zero_point;
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) (uint32_t) *i17;
const int32_t vk17 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34] - vkernel_zero_point;
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) (uint32_t) *i18;
const int32_t vk18 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36] - vkernel_zero_point;
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) (uint32_t) *i19;
const int32_t vk19 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38] - vkernel_zero_point;
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) (uint32_t) *i20;
const int32_t vk20 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40] - vkernel_zero_point;
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) (uint32_t) *i21;
const int32_t vk21 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42] - vkernel_zero_point;
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) (uint32_t) *i22;
const int32_t vk22 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44] - vkernel_zero_point;
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) (uint32_t) *i23;
const int32_t vk23 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46] - vkernel_zero_point;
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) (uint32_t) *i24;
const int32_t vk24 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48] - vkernel_zero_point;
vacc += vi24 * vk24;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,898
| 43.339518
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-25p2c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_25p2c__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
const uint8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
}
const uint8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
}
const uint8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
}
const uint8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
}
const uint8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
}
const uint8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
}
const uint8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
}
const uint8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
}
const uint8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
}
const uint8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
}
const uint8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
}
const uint8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
}
const uint8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
}
const uint8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
}
const uint8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
}
const uint8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
const int32_t vi9x0 = (int32_t) (uint32_t) i9[0];
const int32_t vi9x1 = (int32_t) (uint32_t) i9[1];
i9 += 2;
const int32_t vk9x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk9x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi9x0 * vk9x0;
vacc1 += vi9x1 * vk9x1;
const int32_t vi10x0 = (int32_t) (uint32_t) i10[0];
const int32_t vi10x1 = (int32_t) (uint32_t) i10[1];
i10 += 2;
const int32_t vk10x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20] - vkernel_zero_point;
const int32_t vk10x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[21] - vkernel_zero_point;
vacc0 += vi10x0 * vk10x0;
vacc1 += vi10x1 * vk10x1;
const int32_t vi11x0 = (int32_t) (uint32_t) i11[0];
const int32_t vi11x1 = (int32_t) (uint32_t) i11[1];
i11 += 2;
const int32_t vk11x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22] - vkernel_zero_point;
const int32_t vk11x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[23] - vkernel_zero_point;
vacc0 += vi11x0 * vk11x0;
vacc1 += vi11x1 * vk11x1;
const int32_t vi12x0 = (int32_t) (uint32_t) i12[0];
const int32_t vi12x1 = (int32_t) (uint32_t) i12[1];
i12 += 2;
const int32_t vk12x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24] - vkernel_zero_point;
const int32_t vk12x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[25] - vkernel_zero_point;
vacc0 += vi12x0 * vk12x0;
vacc1 += vi12x1 * vk12x1;
const int32_t vi13x0 = (int32_t) (uint32_t) i13[0];
const int32_t vi13x1 = (int32_t) (uint32_t) i13[1];
i13 += 2;
const int32_t vk13x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26] - vkernel_zero_point;
const int32_t vk13x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[27] - vkernel_zero_point;
vacc0 += vi13x0 * vk13x0;
vacc1 += vi13x1 * vk13x1;
const int32_t vi14x0 = (int32_t) (uint32_t) i14[0];
const int32_t vi14x1 = (int32_t) (uint32_t) i14[1];
i14 += 2;
const int32_t vk14x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28] - vkernel_zero_point;
const int32_t vk14x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[29] - vkernel_zero_point;
vacc0 += vi14x0 * vk14x0;
vacc1 += vi14x1 * vk14x1;
const int32_t vi15x0 = (int32_t) (uint32_t) i15[0];
const int32_t vi15x1 = (int32_t) (uint32_t) i15[1];
i15 += 2;
const int32_t vk15x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30] - vkernel_zero_point;
const int32_t vk15x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[31] - vkernel_zero_point;
vacc0 += vi15x0 * vk15x0;
vacc1 += vi15x1 * vk15x1;
const int32_t vi16x0 = (int32_t) (uint32_t) i16[0];
const int32_t vi16x1 = (int32_t) (uint32_t) i16[1];
i16 += 2;
const int32_t vk16x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32] - vkernel_zero_point;
const int32_t vk16x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[33] - vkernel_zero_point;
vacc0 += vi16x0 * vk16x0;
vacc1 += vi16x1 * vk16x1;
const int32_t vi17x0 = (int32_t) (uint32_t) i17[0];
const int32_t vi17x1 = (int32_t) (uint32_t) i17[1];
i17 += 2;
const int32_t vk17x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34] - vkernel_zero_point;
const int32_t vk17x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[35] - vkernel_zero_point;
vacc0 += vi17x0 * vk17x0;
vacc1 += vi17x1 * vk17x1;
const int32_t vi18x0 = (int32_t) (uint32_t) i18[0];
const int32_t vi18x1 = (int32_t) (uint32_t) i18[1];
i18 += 2;
const int32_t vk18x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36] - vkernel_zero_point;
const int32_t vk18x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[37] - vkernel_zero_point;
vacc0 += vi18x0 * vk18x0;
vacc1 += vi18x1 * vk18x1;
const int32_t vi19x0 = (int32_t) (uint32_t) i19[0];
const int32_t vi19x1 = (int32_t) (uint32_t) i19[1];
i19 += 2;
const int32_t vk19x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38] - vkernel_zero_point;
const int32_t vk19x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[39] - vkernel_zero_point;
vacc0 += vi19x0 * vk19x0;
vacc1 += vi19x1 * vk19x1;
const int32_t vi20x0 = (int32_t) (uint32_t) i20[0];
const int32_t vi20x1 = (int32_t) (uint32_t) i20[1];
i20 += 2;
const int32_t vk20x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40] - vkernel_zero_point;
const int32_t vk20x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[41] - vkernel_zero_point;
vacc0 += vi20x0 * vk20x0;
vacc1 += vi20x1 * vk20x1;
const int32_t vi21x0 = (int32_t) (uint32_t) i21[0];
const int32_t vi21x1 = (int32_t) (uint32_t) i21[1];
i21 += 2;
const int32_t vk21x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42] - vkernel_zero_point;
const int32_t vk21x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[43] - vkernel_zero_point;
vacc0 += vi21x0 * vk21x0;
vacc1 += vi21x1 * vk21x1;
const int32_t vi22x0 = (int32_t) (uint32_t) i22[0];
const int32_t vi22x1 = (int32_t) (uint32_t) i22[1];
i22 += 2;
const int32_t vk22x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44] - vkernel_zero_point;
const int32_t vk22x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[45] - vkernel_zero_point;
vacc0 += vi22x0 * vk22x0;
vacc1 += vi22x1 * vk22x1;
const int32_t vi23x0 = (int32_t) (uint32_t) i23[0];
const int32_t vi23x1 = (int32_t) (uint32_t) i23[1];
i23 += 2;
const int32_t vk23x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46] - vkernel_zero_point;
const int32_t vk23x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[47] - vkernel_zero_point;
vacc0 += vi23x0 * vk23x0;
vacc1 += vi23x1 * vk23x1;
const int32_t vi24x0 = (int32_t) (uint32_t) i24[0];
const int32_t vi24x1 = (int32_t) (uint32_t) i24[1];
i24 += 2;
const int32_t vk24x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48] - vkernel_zero_point;
const int32_t vk24x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[49] - vkernel_zero_point;
vacc0 += vi24x0 * vk24x0;
vacc1 += vi24x1 * vk24x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) (uint32_t) *i9;
const int32_t vk9 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18] - vkernel_zero_point;
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) (uint32_t) *i10;
const int32_t vk10 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20] - vkernel_zero_point;
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) (uint32_t) *i11;
const int32_t vk11 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22] - vkernel_zero_point;
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) (uint32_t) *i12;
const int32_t vk12 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24] - vkernel_zero_point;
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) (uint32_t) *i13;
const int32_t vk13 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26] - vkernel_zero_point;
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) (uint32_t) *i14;
const int32_t vk14 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28] - vkernel_zero_point;
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) (uint32_t) *i15;
const int32_t vk15 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30] - vkernel_zero_point;
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) (uint32_t) *i16;
const int32_t vk16 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32] - vkernel_zero_point;
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) (uint32_t) *i17;
const int32_t vk17 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34] - vkernel_zero_point;
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) (uint32_t) *i18;
const int32_t vk18 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36] - vkernel_zero_point;
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) (uint32_t) *i19;
const int32_t vk19 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38] - vkernel_zero_point;
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) (uint32_t) *i20;
const int32_t vk20 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40] - vkernel_zero_point;
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) (uint32_t) *i21;
const int32_t vk21 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42] - vkernel_zero_point;
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) (uint32_t) *i22;
const int32_t vk22 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44] - vkernel_zero_point;
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) (uint32_t) *i23;
const int32_t vk23 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46] - vkernel_zero_point;
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) (uint32_t) *i24;
const int32_t vk24 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48] - vkernel_zero_point;
vacc += vi24 * vk24;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,715
| 42.675875
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-25p2c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_25p2c__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
const uint8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
}
const uint8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
}
const uint8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
}
const uint8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
}
const uint8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
}
const uint8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
}
const uint8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
}
const uint8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
}
const uint8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
}
const uint8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
}
const uint8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
}
const uint8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
}
const uint8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
}
const uint8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
}
const uint8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
}
const uint8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
const int32_t vi9x0 = (int32_t) (uint32_t) i9[0];
const int32_t vi9x1 = (int32_t) (uint32_t) i9[1];
i9 += 2;
const int32_t vk9x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk9x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi9x0 * vk9x0;
vacc1 += vi9x1 * vk9x1;
const int32_t vi10x0 = (int32_t) (uint32_t) i10[0];
const int32_t vi10x1 = (int32_t) (uint32_t) i10[1];
i10 += 2;
const int32_t vk10x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20] - vkernel_zero_point;
const int32_t vk10x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[21] - vkernel_zero_point;
vacc0 += vi10x0 * vk10x0;
vacc1 += vi10x1 * vk10x1;
const int32_t vi11x0 = (int32_t) (uint32_t) i11[0];
const int32_t vi11x1 = (int32_t) (uint32_t) i11[1];
i11 += 2;
const int32_t vk11x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22] - vkernel_zero_point;
const int32_t vk11x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[23] - vkernel_zero_point;
vacc0 += vi11x0 * vk11x0;
vacc1 += vi11x1 * vk11x1;
const int32_t vi12x0 = (int32_t) (uint32_t) i12[0];
const int32_t vi12x1 = (int32_t) (uint32_t) i12[1];
i12 += 2;
const int32_t vk12x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24] - vkernel_zero_point;
const int32_t vk12x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[25] - vkernel_zero_point;
vacc0 += vi12x0 * vk12x0;
vacc1 += vi12x1 * vk12x1;
const int32_t vi13x0 = (int32_t) (uint32_t) i13[0];
const int32_t vi13x1 = (int32_t) (uint32_t) i13[1];
i13 += 2;
const int32_t vk13x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26] - vkernel_zero_point;
const int32_t vk13x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[27] - vkernel_zero_point;
vacc0 += vi13x0 * vk13x0;
vacc1 += vi13x1 * vk13x1;
const int32_t vi14x0 = (int32_t) (uint32_t) i14[0];
const int32_t vi14x1 = (int32_t) (uint32_t) i14[1];
i14 += 2;
const int32_t vk14x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28] - vkernel_zero_point;
const int32_t vk14x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[29] - vkernel_zero_point;
vacc0 += vi14x0 * vk14x0;
vacc1 += vi14x1 * vk14x1;
const int32_t vi15x0 = (int32_t) (uint32_t) i15[0];
const int32_t vi15x1 = (int32_t) (uint32_t) i15[1];
i15 += 2;
const int32_t vk15x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30] - vkernel_zero_point;
const int32_t vk15x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[31] - vkernel_zero_point;
vacc0 += vi15x0 * vk15x0;
vacc1 += vi15x1 * vk15x1;
const int32_t vi16x0 = (int32_t) (uint32_t) i16[0];
const int32_t vi16x1 = (int32_t) (uint32_t) i16[1];
i16 += 2;
const int32_t vk16x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32] - vkernel_zero_point;
const int32_t vk16x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[33] - vkernel_zero_point;
vacc0 += vi16x0 * vk16x0;
vacc1 += vi16x1 * vk16x1;
const int32_t vi17x0 = (int32_t) (uint32_t) i17[0];
const int32_t vi17x1 = (int32_t) (uint32_t) i17[1];
i17 += 2;
const int32_t vk17x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34] - vkernel_zero_point;
const int32_t vk17x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[35] - vkernel_zero_point;
vacc0 += vi17x0 * vk17x0;
vacc1 += vi17x1 * vk17x1;
const int32_t vi18x0 = (int32_t) (uint32_t) i18[0];
const int32_t vi18x1 = (int32_t) (uint32_t) i18[1];
i18 += 2;
const int32_t vk18x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36] - vkernel_zero_point;
const int32_t vk18x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[37] - vkernel_zero_point;
vacc0 += vi18x0 * vk18x0;
vacc1 += vi18x1 * vk18x1;
const int32_t vi19x0 = (int32_t) (uint32_t) i19[0];
const int32_t vi19x1 = (int32_t) (uint32_t) i19[1];
i19 += 2;
const int32_t vk19x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38] - vkernel_zero_point;
const int32_t vk19x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[39] - vkernel_zero_point;
vacc0 += vi19x0 * vk19x0;
vacc1 += vi19x1 * vk19x1;
const int32_t vi20x0 = (int32_t) (uint32_t) i20[0];
const int32_t vi20x1 = (int32_t) (uint32_t) i20[1];
i20 += 2;
const int32_t vk20x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40] - vkernel_zero_point;
const int32_t vk20x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[41] - vkernel_zero_point;
vacc0 += vi20x0 * vk20x0;
vacc1 += vi20x1 * vk20x1;
const int32_t vi21x0 = (int32_t) (uint32_t) i21[0];
const int32_t vi21x1 = (int32_t) (uint32_t) i21[1];
i21 += 2;
const int32_t vk21x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42] - vkernel_zero_point;
const int32_t vk21x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[43] - vkernel_zero_point;
vacc0 += vi21x0 * vk21x0;
vacc1 += vi21x1 * vk21x1;
const int32_t vi22x0 = (int32_t) (uint32_t) i22[0];
const int32_t vi22x1 = (int32_t) (uint32_t) i22[1];
i22 += 2;
const int32_t vk22x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44] - vkernel_zero_point;
const int32_t vk22x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[45] - vkernel_zero_point;
vacc0 += vi22x0 * vk22x0;
vacc1 += vi22x1 * vk22x1;
const int32_t vi23x0 = (int32_t) (uint32_t) i23[0];
const int32_t vi23x1 = (int32_t) (uint32_t) i23[1];
i23 += 2;
const int32_t vk23x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46] - vkernel_zero_point;
const int32_t vk23x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[47] - vkernel_zero_point;
vacc0 += vi23x0 * vk23x0;
vacc1 += vi23x1 * vk23x1;
const int32_t vi24x0 = (int32_t) (uint32_t) i24[0];
const int32_t vi24x1 = (int32_t) (uint32_t) i24[1];
i24 += 2;
const int32_t vk24x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48] - vkernel_zero_point;
const int32_t vk24x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[49] - vkernel_zero_point;
vacc0 += vi24x0 * vk24x0;
vacc1 += vi24x1 * vk24x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) (uint32_t) *i9;
const int32_t vk9 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18] - vkernel_zero_point;
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) (uint32_t) *i10;
const int32_t vk10 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20] - vkernel_zero_point;
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) (uint32_t) *i11;
const int32_t vk11 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22] - vkernel_zero_point;
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) (uint32_t) *i12;
const int32_t vk12 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24] - vkernel_zero_point;
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) (uint32_t) *i13;
const int32_t vk13 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26] - vkernel_zero_point;
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) (uint32_t) *i14;
const int32_t vk14 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28] - vkernel_zero_point;
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) (uint32_t) *i15;
const int32_t vk15 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30] - vkernel_zero_point;
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) (uint32_t) *i16;
const int32_t vk16 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32] - vkernel_zero_point;
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) (uint32_t) *i17;
const int32_t vk17 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34] - vkernel_zero_point;
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) (uint32_t) *i18;
const int32_t vk18 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36] - vkernel_zero_point;
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) (uint32_t) *i19;
const int32_t vk19 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38] - vkernel_zero_point;
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) (uint32_t) *i20;
const int32_t vk20 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40] - vkernel_zero_point;
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) (uint32_t) *i21;
const int32_t vk21 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42] - vkernel_zero_point;
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) (uint32_t) *i22;
const int32_t vk22 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44] - vkernel_zero_point;
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) (uint32_t) *i23;
const int32_t vk23 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46] - vkernel_zero_point;
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) (uint32_t) *i24;
const int32_t vk24 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48] - vkernel_zero_point;
vacc += vi24 * vk24;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,794
| 43.146568
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-25p2c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_25p2c__wasm_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
const uint8_t* i9 = input[9];
assert(i9 != NULL);
if XNN_UNPREDICTABLE(i9 != zero) {
i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
}
const uint8_t* i10 = input[10];
assert(i10 != NULL);
if XNN_UNPREDICTABLE(i10 != zero) {
i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
}
const uint8_t* i11 = input[11];
assert(i11 != NULL);
if XNN_UNPREDICTABLE(i11 != zero) {
i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
}
const uint8_t* i12 = input[12];
assert(i12 != NULL);
if XNN_UNPREDICTABLE(i12 != zero) {
i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
}
const uint8_t* i13 = input[13];
assert(i13 != NULL);
if XNN_UNPREDICTABLE(i13 != zero) {
i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
}
const uint8_t* i14 = input[14];
assert(i14 != NULL);
if XNN_UNPREDICTABLE(i14 != zero) {
i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
}
const uint8_t* i15 = input[15];
assert(i15 != NULL);
if XNN_UNPREDICTABLE(i15 != zero) {
i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
}
const uint8_t* i16 = input[16];
assert(i16 != NULL);
if XNN_UNPREDICTABLE(i16 != zero) {
i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
}
const uint8_t* i17 = input[17];
assert(i17 != NULL);
if XNN_UNPREDICTABLE(i17 != zero) {
i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
}
const uint8_t* i18 = input[18];
assert(i18 != NULL);
if XNN_UNPREDICTABLE(i18 != zero) {
i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
}
const uint8_t* i19 = input[19];
assert(i19 != NULL);
if XNN_UNPREDICTABLE(i19 != zero) {
i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
}
const uint8_t* i20 = input[20];
assert(i20 != NULL);
if XNN_UNPREDICTABLE(i20 != zero) {
i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
}
const uint8_t* i21 = input[21];
assert(i21 != NULL);
if XNN_UNPREDICTABLE(i21 != zero) {
i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
}
const uint8_t* i22 = input[22];
assert(i22 != NULL);
if XNN_UNPREDICTABLE(i22 != zero) {
i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
}
const uint8_t* i23 = input[23];
assert(i23 != NULL);
if XNN_UNPREDICTABLE(i23 != zero) {
i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
}
const uint8_t* i24 = input[24];
assert(i24 != NULL);
if XNN_UNPREDICTABLE(i24 != zero) {
i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
const int32_t vi9x0 = (int32_t) (uint32_t) i9[0];
const int32_t vi9x1 = (int32_t) (uint32_t) i9[1];
i9 += 2;
const int32_t vk9x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk9x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi9x0 * vk9x0;
vacc1 += vi9x1 * vk9x1;
const int32_t vi10x0 = (int32_t) (uint32_t) i10[0];
const int32_t vi10x1 = (int32_t) (uint32_t) i10[1];
i10 += 2;
const int32_t vk10x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20] - vkernel_zero_point;
const int32_t vk10x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[21] - vkernel_zero_point;
vacc0 += vi10x0 * vk10x0;
vacc1 += vi10x1 * vk10x1;
const int32_t vi11x0 = (int32_t) (uint32_t) i11[0];
const int32_t vi11x1 = (int32_t) (uint32_t) i11[1];
i11 += 2;
const int32_t vk11x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22] - vkernel_zero_point;
const int32_t vk11x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[23] - vkernel_zero_point;
vacc0 += vi11x0 * vk11x0;
vacc1 += vi11x1 * vk11x1;
const int32_t vi12x0 = (int32_t) (uint32_t) i12[0];
const int32_t vi12x1 = (int32_t) (uint32_t) i12[1];
i12 += 2;
const int32_t vk12x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24] - vkernel_zero_point;
const int32_t vk12x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[25] - vkernel_zero_point;
vacc0 += vi12x0 * vk12x0;
vacc1 += vi12x1 * vk12x1;
const int32_t vi13x0 = (int32_t) (uint32_t) i13[0];
const int32_t vi13x1 = (int32_t) (uint32_t) i13[1];
i13 += 2;
const int32_t vk13x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26] - vkernel_zero_point;
const int32_t vk13x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[27] - vkernel_zero_point;
vacc0 += vi13x0 * vk13x0;
vacc1 += vi13x1 * vk13x1;
const int32_t vi14x0 = (int32_t) (uint32_t) i14[0];
const int32_t vi14x1 = (int32_t) (uint32_t) i14[1];
i14 += 2;
const int32_t vk14x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28] - vkernel_zero_point;
const int32_t vk14x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[29] - vkernel_zero_point;
vacc0 += vi14x0 * vk14x0;
vacc1 += vi14x1 * vk14x1;
const int32_t vi15x0 = (int32_t) (uint32_t) i15[0];
const int32_t vi15x1 = (int32_t) (uint32_t) i15[1];
i15 += 2;
const int32_t vk15x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30] - vkernel_zero_point;
const int32_t vk15x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[31] - vkernel_zero_point;
vacc0 += vi15x0 * vk15x0;
vacc1 += vi15x1 * vk15x1;
const int32_t vi16x0 = (int32_t) (uint32_t) i16[0];
const int32_t vi16x1 = (int32_t) (uint32_t) i16[1];
i16 += 2;
const int32_t vk16x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32] - vkernel_zero_point;
const int32_t vk16x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[33] - vkernel_zero_point;
vacc0 += vi16x0 * vk16x0;
vacc1 += vi16x1 * vk16x1;
const int32_t vi17x0 = (int32_t) (uint32_t) i17[0];
const int32_t vi17x1 = (int32_t) (uint32_t) i17[1];
i17 += 2;
const int32_t vk17x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34] - vkernel_zero_point;
const int32_t vk17x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[35] - vkernel_zero_point;
vacc0 += vi17x0 * vk17x0;
vacc1 += vi17x1 * vk17x1;
const int32_t vi18x0 = (int32_t) (uint32_t) i18[0];
const int32_t vi18x1 = (int32_t) (uint32_t) i18[1];
i18 += 2;
const int32_t vk18x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36] - vkernel_zero_point;
const int32_t vk18x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[37] - vkernel_zero_point;
vacc0 += vi18x0 * vk18x0;
vacc1 += vi18x1 * vk18x1;
const int32_t vi19x0 = (int32_t) (uint32_t) i19[0];
const int32_t vi19x1 = (int32_t) (uint32_t) i19[1];
i19 += 2;
const int32_t vk19x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38] - vkernel_zero_point;
const int32_t vk19x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[39] - vkernel_zero_point;
vacc0 += vi19x0 * vk19x0;
vacc1 += vi19x1 * vk19x1;
const int32_t vi20x0 = (int32_t) (uint32_t) i20[0];
const int32_t vi20x1 = (int32_t) (uint32_t) i20[1];
i20 += 2;
const int32_t vk20x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40] - vkernel_zero_point;
const int32_t vk20x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[41] - vkernel_zero_point;
vacc0 += vi20x0 * vk20x0;
vacc1 += vi20x1 * vk20x1;
const int32_t vi21x0 = (int32_t) (uint32_t) i21[0];
const int32_t vi21x1 = (int32_t) (uint32_t) i21[1];
i21 += 2;
const int32_t vk21x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42] - vkernel_zero_point;
const int32_t vk21x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[43] - vkernel_zero_point;
vacc0 += vi21x0 * vk21x0;
vacc1 += vi21x1 * vk21x1;
const int32_t vi22x0 = (int32_t) (uint32_t) i22[0];
const int32_t vi22x1 = (int32_t) (uint32_t) i22[1];
i22 += 2;
const int32_t vk22x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44] - vkernel_zero_point;
const int32_t vk22x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[45] - vkernel_zero_point;
vacc0 += vi22x0 * vk22x0;
vacc1 += vi22x1 * vk22x1;
const int32_t vi23x0 = (int32_t) (uint32_t) i23[0];
const int32_t vi23x1 = (int32_t) (uint32_t) i23[1];
i23 += 2;
const int32_t vk23x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46] - vkernel_zero_point;
const int32_t vk23x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[47] - vkernel_zero_point;
vacc0 += vi23x0 * vk23x0;
vacc1 += vi23x1 * vk23x1;
const int32_t vi24x0 = (int32_t) (uint32_t) i24[0];
const int32_t vi24x1 = (int32_t) (uint32_t) i24[1];
i24 += 2;
const int32_t vk24x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48] - vkernel_zero_point;
const int32_t vk24x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[49] - vkernel_zero_point;
vacc0 += vi24x0 * vk24x0;
vacc1 += vi24x1 * vk24x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi8 * vk8;
const int32_t vi9 = (int32_t) (uint32_t) *i9;
const int32_t vk9 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18] - vkernel_zero_point;
vacc += vi9 * vk9;
const int32_t vi10 = (int32_t) (uint32_t) *i10;
const int32_t vk10 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20] - vkernel_zero_point;
vacc += vi10 * vk10;
const int32_t vi11 = (int32_t) (uint32_t) *i11;
const int32_t vk11 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22] - vkernel_zero_point;
vacc += vi11 * vk11;
const int32_t vi12 = (int32_t) (uint32_t) *i12;
const int32_t vk12 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24] - vkernel_zero_point;
vacc += vi12 * vk12;
const int32_t vi13 = (int32_t) (uint32_t) *i13;
const int32_t vk13 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26] - vkernel_zero_point;
vacc += vi13 * vk13;
const int32_t vi14 = (int32_t) (uint32_t) *i14;
const int32_t vk14 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28] - vkernel_zero_point;
vacc += vi14 * vk14;
const int32_t vi15 = (int32_t) (uint32_t) *i15;
const int32_t vk15 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30] - vkernel_zero_point;
vacc += vi15 * vk15;
const int32_t vi16 = (int32_t) (uint32_t) *i16;
const int32_t vk16 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32] - vkernel_zero_point;
vacc += vi16 * vk16;
const int32_t vi17 = (int32_t) (uint32_t) *i17;
const int32_t vk17 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34] - vkernel_zero_point;
vacc += vi17 * vk17;
const int32_t vi18 = (int32_t) (uint32_t) *i18;
const int32_t vk18 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36] - vkernel_zero_point;
vacc += vi18 * vk18;
const int32_t vi19 = (int32_t) (uint32_t) *i19;
const int32_t vk19 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38] - vkernel_zero_point;
vacc += vi19 * vk19;
const int32_t vi20 = (int32_t) (uint32_t) *i20;
const int32_t vk20 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40] - vkernel_zero_point;
vacc += vi20 * vk20;
const int32_t vi21 = (int32_t) (uint32_t) *i21;
const int32_t vk21 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42] - vkernel_zero_point;
vacc += vi21 * vk21;
const int32_t vi22 = (int32_t) (uint32_t) *i22;
const int32_t vk22 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44] - vkernel_zero_point;
vacc += vi22 * vk22;
const int32_t vi23 = (int32_t) (uint32_t) *i23;
const int32_t vk23 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46] - vkernel_zero_point;
vacc += vi23 * vk23;
const int32_t vi24 = (int32_t) (uint32_t) *i24;
const int32_t vk24 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48] - vkernel_zero_point;
vacc += vi24 * vk24;
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,956
| 43.447124
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l1c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l1c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,335
| 37.414747
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l1c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l1c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,223
| 36.724771
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l1c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l1c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,239
| 36.97235
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l1c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l1c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 8,353
| 37.497696
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l2c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l2c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 10 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,011
| 37.676329
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l2c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l2c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 10 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,834
| 36.882775
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l2c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l2c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 10 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 15,907
| 37.425121
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l2c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l2c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 10 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
w = (const void*) ((uintptr_t) w + 10 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,069
| 37.816425
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l4c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l4c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 20 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,376
| 42.777154
| 133
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l4c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l4c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 20 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2);
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout2 = math_max_s32(vout2, vmagic_min);
vout3 = math_max_s32(vout3, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout2 = math_min_s32(vout2, vmagic_max);
vout3 = math_min_s32(vout3, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
vout2 -= vmagic_bias_less_zero_point;
vout3 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,135
| 41.844444
| 133
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l4c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l4c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 20 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,264
| 42.567416
| 133
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l4c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l4c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 20 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *((const int32_t*) w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 4; c -= 4) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
int32_t vacc2 = b[2];
int32_t vacc3 = b[3];
b += 4;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) w)[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) w)[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
w = (const void*) ((uintptr_t) w + 20 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = __builtin_wasm_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = __builtin_wasm_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = __builtin_wasm_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = __builtin_wasm_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
w = (const void*) ((uintptr_t) w + 5 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,474
| 42.960674
| 133
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l8c8s8r-minmax-fp32-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->fp32_neon.kernel_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0)));
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1)));
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2)));
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3)));
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4)));
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,486
| 44.046448
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l8c8s8r-minmax-fp32-neonv8-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__neonv8_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->fp32_neonv8.kernel_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0)));
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1)));
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2)));
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3)));
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4)));
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,198
| 43.625344
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l8c8s8r-minmax-fp32-sse2-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__sse2_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m128i vk_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(uint8_t)));
i0 += 8;
const __m128i vzero = _mm_setzero_si128();
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk0x01234567, vzero), vk_zero_point);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(uint8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk1x01234567, vzero), vk_zero_point);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(uint8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk2x01234567, vzero), vk_zero_point);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(uint8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk3x01234567, vzero), vk_zero_point);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(uint8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk4x01234567, vzero), vk_zero_point);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(uint8_t));
_mm_storeu_si128((__m128i*) b, vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(uint8_t)));
i0 += 8;
const __m128i vzero = _mm_setzero_si128();
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk0x01234567, vzero), vk_zero_point);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(uint8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk1x01234567, vzero), vk_zero_point);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(uint8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk2x01234567, vzero), vk_zero_point);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(uint8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk3x01234567, vzero), vk_zero_point);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(uint8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk4x01234567, vzero), vk_zero_point);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
w = (const void*) ((uintptr_t) w + 40 * sizeof(uint8_t));
_mm_storeu_si128((__m128i*) (b), vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(uint8_t)));
i0 += 8;
const __m128i vzero = _mm_setzero_si128();
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk0x01234567, vzero), vk_zero_point);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(uint8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk1x01234567, vzero), vk_zero_point);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(uint8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk2x01234567, vzero), vk_zero_point);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(uint8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk3x01234567, vzero), vk_zero_point);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(uint8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk4x01234567, vzero), vk_zero_point);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
w = (const void*) ((uintptr_t) w + 40 * sizeof(uint8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(uint8_t)));
const __m128i vzero = _mm_setzero_si128();
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk0x01234567, vzero), vk_zero_point);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(uint8_t)));
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk1x01234567, vzero), vk_zero_point);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(uint8_t)));
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk2x01234567, vzero), vk_zero_point);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(uint8_t)));
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk3x01234567, vzero), vk_zero_point);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(uint8_t)));
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk4x01234567, vzero), vk_zero_point);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (uint8_t) _mm_cvtsi128_si32(vout0123456701234567);
output += 1;
}
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,363
| 46.493177
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l8c8s8r-minmax-fp32-sse41-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_5f5m5l8c8s8r__sse41_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const __m128i vk_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(uint8_t)));
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk0x01234567), vk_zero_point);
i0 += 8;
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(uint8_t)));
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk1x01234567), vk_zero_point);
i1 += 8;
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(uint8_t)));
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk2x01234567), vk_zero_point);
i2 += 8;
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(uint8_t)));
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk3x01234567), vk_zero_point);
i3 += 8;
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(uint8_t)));
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk4x01234567), vk_zero_point);
i4 += 8;
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(uint8_t));
_mm_storeu_si128((__m128i*) b, vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(uint8_t)));
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk0x01234567), vk_zero_point);
i0 += 8;
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(uint8_t)));
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk1x01234567), vk_zero_point);
i1 += 8;
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(uint8_t)));
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk2x01234567), vk_zero_point);
i2 += 8;
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(uint8_t)));
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk3x01234567), vk_zero_point);
i3 += 8;
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(uint8_t)));
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk4x01234567), vk_zero_point);
i4 += 8;
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
w = (const void*) ((uintptr_t) w + 40 * sizeof(uint8_t));
_mm_storeu_si128((__m128i*) (b), vacc0123);
_mm_storeu_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(uint8_t)));
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk0x01234567), vk_zero_point);
i0 += 8;
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(uint8_t)));
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk1x01234567), vk_zero_point);
i1 += 8;
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(uint8_t)));
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk2x01234567), vk_zero_point);
i2 += 8;
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(uint8_t)));
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk3x01234567), vk_zero_point);
i3 += 8;
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(uint8_t)));
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk4x01234567), vk_zero_point);
i4 += 8;
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
w = (const void*) ((uintptr_t) w + 40 * sizeof(uint8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) (b + 4));
b += 8;
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 0 * sizeof(uint8_t)));
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk0x01234567), vk_zero_point);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(uint8_t)));
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk1x01234567), vk_zero_point);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(uint8_t)));
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk2x01234567), vk_zero_point);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(uint8_t)));
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk3x01234567), vk_zero_point);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(uint8_t)));
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk4x01234567), vk_zero_point);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,880
| 45.917485
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-5f5m5l8c8s8r-minmax-rndnu-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_rndnu_ukernel_5f5m5l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 5);
const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->rndnu_neon.kernel_zero_point);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 5 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 5 inputs in each iteration.
for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
input += 5;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 5 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0)));
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1)));
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2)));
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3)));
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4)));
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 16,411
| 44.087912
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-6f6m7l1c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_6f6m7l1c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 6 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,775
| 38.261044
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-6f6m7l1c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_6f6m7l1c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 6 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,663
| 37.656
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-6f6m7l1c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_6f6m7l1c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 6 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,679
| 37.875502
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-6f6m7l1c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_6f6m7l1c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 6 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
w = (const void*) ((uintptr_t) w + 7 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 9,793
| 38.333333
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-6f6m7l2c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_6f6m7l2c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 12 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 12 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
w = (const void*) ((uintptr_t) w + 14 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,103
| 38.308642
| 133
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-6f6m7l2c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_6f6m7l2c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 12 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 12 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
w = (const void*) ((uintptr_t) w + 14 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 18,926
| 37.626531
| 133
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-6f6m7l2c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_6f6m7l2c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 12 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 12 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
w = (const void*) ((uintptr_t) w + 14 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 18,999
| 38.09465
| 133
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-6f6m7l2c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_6f6m7l2c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 12 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 6 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
w = (const void*) ((uintptr_t) w + 12 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
w = (const void*) ((uintptr_t) w + 6 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
w = (const void*) ((uintptr_t) w + 14 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,161
| 38.427984
| 133
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-6f6m7l8c8s8r-minmax-fp32-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_6f6m7l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->fp32_neon.kernel_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0)));
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1)));
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2)));
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3)));
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4)));
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5)));
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6)));
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,710
| 45.708531
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-6f6m7l8c8s8r-minmax-fp32-neonv8-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_6f6m7l8c8s8r__neonv8_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->fp32_neonv8.kernel_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0)));
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1)));
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2)));
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3)));
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4)));
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5)));
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6)));
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,422
| 45.355609
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-6f6m7l8c8s8r-minmax-rndnu-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_rndnu_ukernel_6f6m7l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 6);
const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->rndnu_neon.kernel_zero_point);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 6 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 6 inputs in each iteration.
for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
input += 6;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 7 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0)));
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1)));
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2)));
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3)));
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4)));
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5)));
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6)));
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,635
| 45.752381
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-8f8m9l1c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_8f8m9l1c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 8 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + 9 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,951
| 39.242424
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-8f8m9l1c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_8f8m9l1c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 8 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + 9 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,839
| 38.731544
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-8f8m9l1c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_8f8m9l1c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 8 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + 9 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,855
| 38.919192
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-8f8m9l1c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_8f8m9l1c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 8 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(uint8_t));
*b++ = vacc;
} while (--c != 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
do {
int32_t vacc = unaligned_load_s32(b++);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + 9 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 11,969
| 39.30303
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-8f8m9l2c1s1r-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_8f8m9l2c1s1r__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 16 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 8 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 16 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 18 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,795
| 39.060606
| 133
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-8f8m9l2c1s1r-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_8f8m9l2c1s1r__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 16 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 8 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 16 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 18 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,618
| 38.496656
| 133
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-8f8m9l2c1s1r-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_8f8m9l2c1s1r__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 16 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 8 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 16 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 18 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,691
| 38.885522
| 133
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-8f8m9l2c1s1r-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_8f8m9l2c1s1r__wasm_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 16 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 1 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 1 * sizeof(int32_t) + 8 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
w = (const void*) ((uintptr_t) w + 16 * sizeof(uint8_t));
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
w = (const void*) ((uintptr_t) w + 8 * sizeof(uint8_t));
*b++ = vacc;
}
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 2; c -= 2) {
int32_t vacc0 = b[0];
int32_t vacc1 = b[1];
b += 2;
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) w)[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) w)[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 18 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = b[0];
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) w)[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) w)[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) w)[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) w)[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) w)[8] - vkernel_zero_point;
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,853
| 39.158249
| 133
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-8f8m9l8c8s8r-minmax-fp32-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_8f8m9l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->fp32_neon.kernel_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neon.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i8))); i8 += 8;
const int16x8_t vk8x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0)));
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1)));
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2)));
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3)));
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4)));
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5)));
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6)));
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7)));
const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i8)));
const int16x8_t vk8x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,126
| 47.254
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-8f8m9l8c8s8r-minmax-fp32-neonv8-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_8f8m9l8c8s8r__neonv8_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->fp32_neonv8.kernel_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neonv8.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i8))); i8 += 8;
const int16x8_t vk8x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0)));
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1)));
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2)));
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3)));
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4)));
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5)));
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6)));
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7)));
const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i8)));
const int16x8_t vk8x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 23,838
| 46.965795
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-8f8m9l8c8s8r-minmax-rndnu-neon-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/multipass-neon-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_rndnu_ukernel_8f8m9l8c8s8r__neon_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
size_t kernel_size,
int32_t* buffer,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
assert(kernel_size > 8);
const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->rndnu_neon.kernel_zero_point);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->rndnu_neon.output_max);
do {
const void* w = weights;
// First pass to process 8 inputs.
{
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Middle pass to process 8 inputs in each iteration.
for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) {
int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
input += 8;
size_t c = round_up_po2(channels, 8);
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
assert(c == 0);
}
// Last pass to process up to 9 inputs.
{
const int32_t* b = buffer;
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
size_t c = channels;
for (; c >= 8; c -= 8) {
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i8))); i8 += 8;
const int16x8_t vk8x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const uint8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
int32x4_t vacc0123 = vld1q_s32(b); b += 4;
int32x4_t vacc4567 = vld1q_s32(b); b += 4;
const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0)));
const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1)));
const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2)));
const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3)));
const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4)));
const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5)));
const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6)));
const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7)));
const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
const int16x8_t vi8x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i8)));
const int16x8_t vk8x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); w = (const int8_t*) w + 8;
vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (c & 1) {
vst1_lane_u8(output, vout01234567, 0); output += 1;
}
}
}
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 24,051
| 47.297189
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p1c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p1c__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,371
| 39.089552
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p1c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p1c__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,257
| 37.948148
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p1c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p1c__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,275
| 38.373134
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p1c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p1c__wasm_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(uint8_t));
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,389
| 39.223881
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p1c-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_rndnu_ukernel_9p1c__scalar(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const uint32_t vshift = params->rndnu_scalar.shift;
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
const int32_t vkernel_zero_point = params->rndnu_scalar.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
do {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[1] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[3] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[5] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[7] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi8 * vk8;
w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 9 * sizeof(uint8_t));
const int64_t vextacc = math_mulext_s32(vacc, vmultiplier) + vrounding;
int32_t vout = (int32_t) math_asr_s64(vextacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
vout += voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 5,364
| 39.037313
| 124
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p2c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p2c__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,367
| 40.306773
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p2c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p2c__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,184
| 38.941176
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p2c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p2c__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,263
| 39.89243
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p2c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p2c__wasm_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi8 * vk8;
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,425
| 40.537849
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p2c-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_rndnu_ukernel_9p2c__scalar(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const uint32_t vshift = params->rndnu_scalar.shift;
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
const int32_t vkernel_zero_point = params->rndnu_scalar.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 2; c -= 2) {
int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
i0 += 2;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
i1 += 2;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
i2 += 2;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
i3 += 2;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
i4 += 2;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
i5 += 2;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
i6 += 2;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
i7 += 2;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
i8 += 2;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 18 * sizeof(uint8_t));
const int64_t vextacc0 = math_mulext_s32(vacc0, vmultiplier) + vrounding;
const int64_t vextacc1 = math_mulext_s32(vacc1, vmultiplier) + vrounding;
int32_t vout0 = (int32_t) math_asr_s64(vextacc0, vshift);
int32_t vout1 = (int32_t) math_asr_s64(vextacc1, vshift);
vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
vout0 += voutput_zero_point;
vout1 += voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
int32_t vacc = unaligned_load_s32(w);
const int32_t vi0 = (int32_t) (uint32_t) *i0;
const int32_t vk0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1;
const int32_t vk1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2;
const int32_t vk2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3;
const int32_t vk3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4;
const int32_t vk4 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5;
const int32_t vk5 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6;
const int32_t vk6 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7;
const int32_t vk7 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8;
const int32_t vk8 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16] - vkernel_zero_point;
vacc += vi8 * vk8;
const int64_t vextacc = math_mulext_s32(vacc, vmultiplier) + vrounding;
int32_t vout = (int32_t) math_asr_s64(vextacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
vout += voutput_zero_point;
*output++ = (uint8_t) vout;
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 10,327
| 40.645161
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p4c-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p4c__scalar_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
const int32_t vi5x2 = (int32_t) (uint32_t) i5[2];
const int32_t vi5x3 = (int32_t) (uint32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21] - vkernel_zero_point;
const int32_t vk5x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22] - vkernel_zero_point;
const int32_t vk5x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
const int32_t vi6x2 = (int32_t) (uint32_t) i6[2];
const int32_t vi6x3 = (int32_t) (uint32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25] - vkernel_zero_point;
const int32_t vk6x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26] - vkernel_zero_point;
const int32_t vk6x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
const int32_t vi7x2 = (int32_t) (uint32_t) i7[2];
const int32_t vi7x3 = (int32_t) (uint32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29] - vkernel_zero_point;
const int32_t vk7x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30] - vkernel_zero_point;
const int32_t vk7x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
const int32_t vi8x2 = (int32_t) (uint32_t) i8[2];
const int32_t vi8x3 = (int32_t) (uint32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33] - vkernel_zero_point;
const int32_t vk8x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34] - vkernel_zero_point;
const int32_t vk8x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const uint8_t* k = (const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) k[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) k[4] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) k[8] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) k[12] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) k[16] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) k[20] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) k[24] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) k[28] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) k[32] - vkernel_zero_point;
vacc += vi8 * vk8;
k += 1;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,840
| 44.664615
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p4c-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p4c__scalar_imagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
const int32_t vi5x2 = (int32_t) (uint32_t) i5[2];
const int32_t vi5x3 = (int32_t) (uint32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21] - vkernel_zero_point;
const int32_t vk5x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22] - vkernel_zero_point;
const int32_t vk5x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
const int32_t vi6x2 = (int32_t) (uint32_t) i6[2];
const int32_t vi6x3 = (int32_t) (uint32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25] - vkernel_zero_point;
const int32_t vk6x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26] - vkernel_zero_point;
const int32_t vk6x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
const int32_t vi7x2 = (int32_t) (uint32_t) i7[2];
const int32_t vi7x3 = (int32_t) (uint32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29] - vkernel_zero_point;
const int32_t vk7x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30] - vkernel_zero_point;
const int32_t vk7x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
const int32_t vi8x2 = (int32_t) (uint32_t) i8[2];
const int32_t vi8x3 = (int32_t) (uint32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33] - vkernel_zero_point;
const int32_t vk8x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34] - vkernel_zero_point;
const int32_t vk8x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2);
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout2 = math_max_s32(vout2, vmagic_min);
vout3 = math_max_s32(vout3, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout2 = math_min_s32(vout2, vmagic_max);
vout3 = math_min_s32(vout3, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
vout2 -= vmagic_bias_less_zero_point;
vout3 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const uint8_t* k = (const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) k[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) k[4] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) k[8] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) k[12] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) k[16] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) k[20] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) k[24] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) k[28] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) k[32] - vkernel_zero_point;
vacc += vi8 * vk8;
k += 1;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,589
| 43.07855
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p4c-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p4c__scalar_lrintf(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
const int32_t vi5x2 = (int32_t) (uint32_t) i5[2];
const int32_t vi5x3 = (int32_t) (uint32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21] - vkernel_zero_point;
const int32_t vk5x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22] - vkernel_zero_point;
const int32_t vk5x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
const int32_t vi6x2 = (int32_t) (uint32_t) i6[2];
const int32_t vi6x3 = (int32_t) (uint32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25] - vkernel_zero_point;
const int32_t vk6x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26] - vkernel_zero_point;
const int32_t vk6x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
const int32_t vi7x2 = (int32_t) (uint32_t) i7[2];
const int32_t vi7x3 = (int32_t) (uint32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29] - vkernel_zero_point;
const int32_t vk7x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30] - vkernel_zero_point;
const int32_t vk7x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
const int32_t vi8x2 = (int32_t) (uint32_t) i8[2];
const int32_t vi8x3 = (int32_t) (uint32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33] - vkernel_zero_point;
const int32_t vk8x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34] - vkernel_zero_point;
const int32_t vk8x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point;
int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point;
int32_t vout2 = (int32_t) vrndacc2 + voutput_zero_point;
int32_t vout3 = (int32_t) vrndacc3 + voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const uint8_t* k = (const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) k[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) k[4] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) k[8] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) k[12] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) k[16] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) k[20] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) k[24] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) k[28] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) k[32] - vkernel_zero_point;
vacc += vi8 * vk8;
k += 1;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,728
| 44.32
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p4c-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p4c__wasm_fmagic(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
const int32_t vkernel_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
const int32_t vi5x2 = (int32_t) (uint32_t) i5[2];
const int32_t vi5x3 = (int32_t) (uint32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21] - vkernel_zero_point;
const int32_t vk5x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22] - vkernel_zero_point;
const int32_t vk5x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
const int32_t vi6x2 = (int32_t) (uint32_t) i6[2];
const int32_t vi6x3 = (int32_t) (uint32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25] - vkernel_zero_point;
const int32_t vk6x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26] - vkernel_zero_point;
const int32_t vk6x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
const int32_t vi7x2 = (int32_t) (uint32_t) i7[2];
const int32_t vi7x3 = (int32_t) (uint32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29] - vkernel_zero_point;
const int32_t vk7x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30] - vkernel_zero_point;
const int32_t vk7x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
const int32_t vi8x2 = (int32_t) (uint32_t) i8[2];
const int32_t vi8x3 = (int32_t) (uint32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33] - vkernel_zero_point;
const int32_t vk8x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34] - vkernel_zero_point;
const int32_t vk8x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(uint8_t));
float vfpacc0 = (float) vacc0;
float vfpacc1 = (float) vacc1;
float vfpacc2 = (float) vacc2;
float vfpacc3 = (float) vacc3;
vfpacc0 *= vscale;
vfpacc1 *= vscale;
vfpacc2 *= vscale;
vfpacc3 *= vscale;
vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = __builtin_wasm_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = __builtin_wasm_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = __builtin_wasm_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = __builtin_wasm_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const uint8_t* k = (const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) k[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) k[4] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) k[8] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) k[12] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) k[16] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) k[20] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) k[24] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) k[28] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) k[32] - vkernel_zero_point;
vacc += vi8 * vk8;
k += 1;
float vfpacc = (float) vacc * vscale;
vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,938
| 44.966154
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p4c-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/math.h>
void xnn_qu8_dwconv_minmax_rndnu_ukernel_9p4c__scalar(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(channels != 0);
assert(output_width != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const uint32_t vshift = params->rndnu_scalar.shift;
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
const int32_t vkernel_zero_point = params->rndnu_scalar.kernel_zero_point;
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 4; c -= 4) {
int32_t vacc0 = ((const int32_t*) w)[0];
int32_t vacc1 = ((const int32_t*) w)[1];
int32_t vacc2 = ((const int32_t*) w)[2];
int32_t vacc3 = ((const int32_t*) w)[3];
const int32_t vi0x0 = (int32_t) (uint32_t) i0[0];
const int32_t vi0x1 = (int32_t) (uint32_t) i0[1];
const int32_t vi0x2 = (int32_t) (uint32_t) i0[2];
const int32_t vi0x3 = (int32_t) (uint32_t) i0[3];
i0 += 4;
const int32_t vk0x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[0] - vkernel_zero_point;
const int32_t vk0x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[1] - vkernel_zero_point;
const int32_t vk0x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[2] - vkernel_zero_point;
const int32_t vk0x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[3] - vkernel_zero_point;
vacc0 += vi0x0 * vk0x0;
vacc1 += vi0x1 * vk0x1;
vacc2 += vi0x2 * vk0x2;
vacc3 += vi0x3 * vk0x3;
const int32_t vi1x0 = (int32_t) (uint32_t) i1[0];
const int32_t vi1x1 = (int32_t) (uint32_t) i1[1];
const int32_t vi1x2 = (int32_t) (uint32_t) i1[2];
const int32_t vi1x3 = (int32_t) (uint32_t) i1[3];
i1 += 4;
const int32_t vk1x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[4] - vkernel_zero_point;
const int32_t vk1x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[5] - vkernel_zero_point;
const int32_t vk1x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[6] - vkernel_zero_point;
const int32_t vk1x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[7] - vkernel_zero_point;
vacc0 += vi1x0 * vk1x0;
vacc1 += vi1x1 * vk1x1;
vacc2 += vi1x2 * vk1x2;
vacc3 += vi1x3 * vk1x3;
const int32_t vi2x0 = (int32_t) (uint32_t) i2[0];
const int32_t vi2x1 = (int32_t) (uint32_t) i2[1];
const int32_t vi2x2 = (int32_t) (uint32_t) i2[2];
const int32_t vi2x3 = (int32_t) (uint32_t) i2[3];
i2 += 4;
const int32_t vk2x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[8] - vkernel_zero_point;
const int32_t vk2x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[9] - vkernel_zero_point;
const int32_t vk2x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[10] - vkernel_zero_point;
const int32_t vk2x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[11] - vkernel_zero_point;
vacc0 += vi2x0 * vk2x0;
vacc1 += vi2x1 * vk2x1;
vacc2 += vi2x2 * vk2x2;
vacc3 += vi2x3 * vk2x3;
const int32_t vi3x0 = (int32_t) (uint32_t) i3[0];
const int32_t vi3x1 = (int32_t) (uint32_t) i3[1];
const int32_t vi3x2 = (int32_t) (uint32_t) i3[2];
const int32_t vi3x3 = (int32_t) (uint32_t) i3[3];
i3 += 4;
const int32_t vk3x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[12] - vkernel_zero_point;
const int32_t vk3x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[13] - vkernel_zero_point;
const int32_t vk3x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[14] - vkernel_zero_point;
const int32_t vk3x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[15] - vkernel_zero_point;
vacc0 += vi3x0 * vk3x0;
vacc1 += vi3x1 * vk3x1;
vacc2 += vi3x2 * vk3x2;
vacc3 += vi3x3 * vk3x3;
const int32_t vi4x0 = (int32_t) (uint32_t) i4[0];
const int32_t vi4x1 = (int32_t) (uint32_t) i4[1];
const int32_t vi4x2 = (int32_t) (uint32_t) i4[2];
const int32_t vi4x3 = (int32_t) (uint32_t) i4[3];
i4 += 4;
const int32_t vk4x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[16] - vkernel_zero_point;
const int32_t vk4x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[17] - vkernel_zero_point;
const int32_t vk4x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[18] - vkernel_zero_point;
const int32_t vk4x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[19] - vkernel_zero_point;
vacc0 += vi4x0 * vk4x0;
vacc1 += vi4x1 * vk4x1;
vacc2 += vi4x2 * vk4x2;
vacc3 += vi4x3 * vk4x3;
const int32_t vi5x0 = (int32_t) (uint32_t) i5[0];
const int32_t vi5x1 = (int32_t) (uint32_t) i5[1];
const int32_t vi5x2 = (int32_t) (uint32_t) i5[2];
const int32_t vi5x3 = (int32_t) (uint32_t) i5[3];
i5 += 4;
const int32_t vk5x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[20] - vkernel_zero_point;
const int32_t vk5x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[21] - vkernel_zero_point;
const int32_t vk5x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[22] - vkernel_zero_point;
const int32_t vk5x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[23] - vkernel_zero_point;
vacc0 += vi5x0 * vk5x0;
vacc1 += vi5x1 * vk5x1;
vacc2 += vi5x2 * vk5x2;
vacc3 += vi5x3 * vk5x3;
const int32_t vi6x0 = (int32_t) (uint32_t) i6[0];
const int32_t vi6x1 = (int32_t) (uint32_t) i6[1];
const int32_t vi6x2 = (int32_t) (uint32_t) i6[2];
const int32_t vi6x3 = (int32_t) (uint32_t) i6[3];
i6 += 4;
const int32_t vk6x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[24] - vkernel_zero_point;
const int32_t vk6x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[25] - vkernel_zero_point;
const int32_t vk6x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[26] - vkernel_zero_point;
const int32_t vk6x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[27] - vkernel_zero_point;
vacc0 += vi6x0 * vk6x0;
vacc1 += vi6x1 * vk6x1;
vacc2 += vi6x2 * vk6x2;
vacc3 += vi6x3 * vk6x3;
const int32_t vi7x0 = (int32_t) (uint32_t) i7[0];
const int32_t vi7x1 = (int32_t) (uint32_t) i7[1];
const int32_t vi7x2 = (int32_t) (uint32_t) i7[2];
const int32_t vi7x3 = (int32_t) (uint32_t) i7[3];
i7 += 4;
const int32_t vk7x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[28] - vkernel_zero_point;
const int32_t vk7x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[29] - vkernel_zero_point;
const int32_t vk7x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[30] - vkernel_zero_point;
const int32_t vk7x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[31] - vkernel_zero_point;
vacc0 += vi7x0 * vk7x0;
vacc1 += vi7x1 * vk7x1;
vacc2 += vi7x2 * vk7x2;
vacc3 += vi7x3 * vk7x3;
const int32_t vi8x0 = (int32_t) (uint32_t) i8[0];
const int32_t vi8x1 = (int32_t) (uint32_t) i8[1];
const int32_t vi8x2 = (int32_t) (uint32_t) i8[2];
const int32_t vi8x3 = (int32_t) (uint32_t) i8[3];
i8 += 4;
const int32_t vk8x0 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[32] - vkernel_zero_point;
const int32_t vk8x1 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[33] - vkernel_zero_point;
const int32_t vk8x2 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[34] - vkernel_zero_point;
const int32_t vk8x3 = (int32_t) (uint32_t) ((const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t)))[35] - vkernel_zero_point;
vacc0 += vi8x0 * vk8x0;
vacc1 += vi8x1 * vk8x1;
vacc2 += vi8x2 * vk8x2;
vacc3 += vi8x3 * vk8x3;
w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t) + 36 * sizeof(uint8_t));
const int64_t vextacc0 = math_mulext_s32(vacc0, vmultiplier) + vrounding;
const int64_t vextacc1 = math_mulext_s32(vacc1, vmultiplier) + vrounding;
const int64_t vextacc2 = math_mulext_s32(vacc2, vmultiplier) + vrounding;
const int64_t vextacc3 = math_mulext_s32(vacc3, vmultiplier) + vrounding;
int32_t vout0 = (int32_t) math_asr_s64(vextacc0, vshift);
int32_t vout1 = (int32_t) math_asr_s64(vextacc1, vshift);
int32_t vout2 = (int32_t) math_asr_s64(vextacc2, vshift);
int32_t vout3 = (int32_t) math_asr_s64(vextacc3, vshift);
vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
vout2 = math_max_s32(vout2, voutput_min_less_zero_point);
vout3 = math_max_s32(vout3, voutput_min_less_zero_point);
vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
vout2 = math_min_s32(vout2, voutput_max_less_zero_point);
vout3 = math_min_s32(vout3, voutput_max_less_zero_point);
vout0 += voutput_zero_point;
vout1 += voutput_zero_point;
vout2 += voutput_zero_point;
vout3 += voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
const uint8_t* k = (const uint8_t*) ((uintptr_t) w + 4 * sizeof(int32_t));
do {
int32_t vacc = *((const int32_t*) w);
w = (const void*) ((uintptr_t) w + sizeof(int32_t));
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
const int32_t vk0 = (int32_t) (uint32_t) k[0] - vkernel_zero_point;
vacc += vi0 * vk0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
const int32_t vk1 = (int32_t) (uint32_t) k[4] - vkernel_zero_point;
vacc += vi1 * vk1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
const int32_t vk2 = (int32_t) (uint32_t) k[8] - vkernel_zero_point;
vacc += vi2 * vk2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
const int32_t vk3 = (int32_t) (uint32_t) k[12] - vkernel_zero_point;
vacc += vi3 * vk3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
const int32_t vk4 = (int32_t) (uint32_t) k[16] - vkernel_zero_point;
vacc += vi4 * vk4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
const int32_t vk5 = (int32_t) (uint32_t) k[20] - vkernel_zero_point;
vacc += vi5 * vk5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
const int32_t vk6 = (int32_t) (uint32_t) k[24] - vkernel_zero_point;
vacc += vi6 * vk6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
const int32_t vk7 = (int32_t) (uint32_t) k[28] - vkernel_zero_point;
vacc += vi7 * vk7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
const int32_t vk8 = (int32_t) (uint32_t) k[32] - vkernel_zero_point;
vacc += vi8 * vk8;
k += 1;
const int64_t vextacc = math_mulext_s32(vacc, vmultiplier) + vrounding;
int32_t vout = (int32_t) math_asr_s64(vextacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
vout += voutput_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 14,768
| 45.153125
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p8c-minmax-fp32-avx-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p8c__avx_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
const __m128i vk_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(uint8_t)));
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk0x01234567), vk_zero_point);
i0 += 8;
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(uint8_t)));
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk1x01234567), vk_zero_point);
i1 += 8;
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(uint8_t)));
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk2x01234567), vk_zero_point);
i2 += 8;
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(uint8_t)));
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk3x01234567), vk_zero_point);
i3 += 8;
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(uint8_t)));
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk4x01234567), vk_zero_point);
i4 += 8;
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(uint8_t)));
const __m128i vxk5x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk5x01234567), vk_zero_point);
i5 += 8;
const __m128i vprod5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
const __m128i vprod5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod5x01234567lo, vprod5x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod5x01234567lo, vprod5x01234567hi));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(uint8_t)));
const __m128i vxk6x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk6x01234567), vk_zero_point);
i6 += 8;
const __m128i vprod6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vprod6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod6x01234567lo, vprod6x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod6x01234567lo, vprod6x01234567hi));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepu8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(uint8_t)));
const __m128i vxk7x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk7x01234567), vk_zero_point);
i7 += 8;
const __m128i vprod7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
const __m128i vprod7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod7x01234567lo, vprod7x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod7x01234567lo, vprod7x01234567hi));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepu8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(uint8_t)));
const __m128i vxk8x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk8x01234567), vk_zero_point);
i8 += 8;
const __m128i vprod8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
const __m128i vprod8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod8x01234567lo, vprod8x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod8x01234567lo, vprod8x01234567hi));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(uint8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(uint8_t)));
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk0x01234567), vk_zero_point);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(uint8_t)));
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk1x01234567), vk_zero_point);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(uint8_t)));
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk2x01234567), vk_zero_point);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(uint8_t)));
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk3x01234567), vk_zero_point);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(uint8_t)));
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk4x01234567), vk_zero_point);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(uint8_t)));
const __m128i vxk5x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk5x01234567), vk_zero_point);
const __m128i vprod5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
const __m128i vprod5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod5x01234567lo, vprod5x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod5x01234567lo, vprod5x01234567hi));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(uint8_t)));
const __m128i vxk6x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk6x01234567), vk_zero_point);
const __m128i vprod6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vprod6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod6x01234567lo, vprod6x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod6x01234567lo, vprod6x01234567hi));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepu8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(uint8_t)));
const __m128i vxk7x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk7x01234567), vk_zero_point);
const __m128i vprod7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
const __m128i vprod7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod7x01234567lo, vprod7x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod7x01234567lo, vprod7x01234567hi));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepu8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(uint8_t)));
const __m128i vxk8x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk8x01234567), vk_zero_point);
const __m128i vprod8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
const __m128i vprod8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod8x01234567lo, vprod8x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod8x01234567lo, vprod8x01234567hi));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,739
| 49.615385
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p8c-minmax-fp32-sse2-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p8c__sse2_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
const __m128i vk_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(uint8_t)));
i0 += 8;
const __m128i vzero = _mm_setzero_si128();
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk0x01234567, vzero), vk_zero_point);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(uint8_t)));
i1 += 8;
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk1x01234567, vzero), vk_zero_point);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(uint8_t)));
i2 += 8;
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk2x01234567, vzero), vk_zero_point);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(uint8_t)));
i3 += 8;
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk3x01234567, vzero), vk_zero_point);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(uint8_t)));
i4 += 8;
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk4x01234567, vzero), vk_zero_point);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(uint8_t)));
i5 += 8;
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
const __m128i vxk5x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk5x01234567, vzero), vk_zero_point);
const __m128i vprod5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
const __m128i vprod5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod5x01234567lo, vprod5x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod5x01234567lo, vprod5x01234567hi));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(uint8_t)));
i6 += 8;
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
const __m128i vxk6x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk6x01234567, vzero), vk_zero_point);
const __m128i vprod6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vprod6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod6x01234567lo, vprod6x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod6x01234567lo, vprod6x01234567hi));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(uint8_t)));
i7 += 8;
const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, vzero);
const __m128i vxk7x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk7x01234567, vzero), vk_zero_point);
const __m128i vprod7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
const __m128i vprod7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod7x01234567lo, vprod7x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod7x01234567lo, vprod7x01234567hi));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(uint8_t)));
i8 += 8;
const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, vzero);
const __m128i vxk8x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk8x01234567, vzero), vk_zero_point);
const __m128i vprod8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
const __m128i vprod8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod8x01234567lo, vprod8x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod8x01234567lo, vprod8x01234567hi));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(uint8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(uint8_t)));
const __m128i vzero = _mm_setzero_si128();
const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, vzero);
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk0x01234567, vzero), vk_zero_point);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(uint8_t)));
const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, vzero);
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk1x01234567, vzero), vk_zero_point);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(uint8_t)));
const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, vzero);
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk2x01234567, vzero), vk_zero_point);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(uint8_t)));
const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, vzero);
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk3x01234567, vzero), vk_zero_point);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(uint8_t)));
const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, vzero);
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk4x01234567, vzero), vk_zero_point);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(uint8_t)));
const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, vzero);
const __m128i vxk5x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk5x01234567, vzero), vk_zero_point);
const __m128i vprod5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
const __m128i vprod5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod5x01234567lo, vprod5x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod5x01234567lo, vprod5x01234567hi));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(uint8_t)));
const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, vzero);
const __m128i vxk6x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk6x01234567, vzero), vk_zero_point);
const __m128i vprod6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vprod6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod6x01234567lo, vprod6x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod6x01234567lo, vprod6x01234567hi));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(uint8_t)));
const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, vzero);
const __m128i vxk7x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk7x01234567, vzero), vk_zero_point);
const __m128i vprod7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
const __m128i vprod7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod7x01234567lo, vprod7x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod7x01234567lo, vprod7x01234567hi));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(uint8_t)));
const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, vzero);
const __m128i vxk8x01234567 = _mm_sub_epi16(_mm_unpacklo_epi8(vk8x01234567, vzero), vk_zero_point);
const __m128i vprod8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
const __m128i vprod8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod8x01234567lo, vprod8x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod8x01234567lo, vprod8x01234567hi));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (uint8_t) _mm_cvtsi128_si32(vout0123456701234567);
output += 1;
}
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 20,090
| 50.252551
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-dwconv/gen/qu8-dwconv-9p8c-minmax-fp32-sse41-mul16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-dwconv/unipass-sse-mul16.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/dwconv.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_dwconv_minmax_fp32_ukernel_9p8c__sse41_mul16(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
intptr_t input_stride,
size_t output_increment,
size_t input_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(channels != 0);
assert(output_width != 0);
do {
const uint8_t* i0 = input[0];
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = input[1];
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = input[2];
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = input[3];
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = input[4];
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = input[5];
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = input[6];
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = input[7];
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = input[8];
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
input = (const uint8_t**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
const __m128i vk_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
for (; c >= 8; c -= 8) {
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(uint8_t)));
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk0x01234567), vk_zero_point);
i0 += 8;
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(uint8_t)));
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk1x01234567), vk_zero_point);
i1 += 8;
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(uint8_t)));
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk2x01234567), vk_zero_point);
i2 += 8;
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(uint8_t)));
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk3x01234567), vk_zero_point);
i3 += 8;
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(uint8_t)));
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk4x01234567), vk_zero_point);
i4 += 8;
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(uint8_t)));
const __m128i vxk5x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk5x01234567), vk_zero_point);
i5 += 8;
const __m128i vprod5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
const __m128i vprod5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod5x01234567lo, vprod5x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod5x01234567lo, vprod5x01234567hi));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(uint8_t)));
const __m128i vxk6x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk6x01234567), vk_zero_point);
i6 += 8;
const __m128i vprod6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vprod6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod6x01234567lo, vprod6x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod6x01234567lo, vprod6x01234567hi));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepu8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(uint8_t)));
const __m128i vxk7x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk7x01234567), vk_zero_point);
i7 += 8;
const __m128i vprod7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
const __m128i vprod7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod7x01234567lo, vprod7x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod7x01234567lo, vprod7x01234567hi));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepu8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(uint8_t)));
const __m128i vxk8x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk8x01234567), vk_zero_point);
i8 += 8;
const __m128i vprod8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
const __m128i vprod8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod8x01234567lo, vprod8x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod8x01234567lo, vprod8x01234567hi));
w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(uint8_t));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(c != 0) {
{
__m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(vi0x01234567);
const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(uint8_t)));
const __m128i vxk0x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk0x01234567), vk_zero_point);
const __m128i vprod0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
const __m128i vprod0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod0x01234567lo, vprod0x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod0x01234567lo, vprod0x01234567hi));
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(vi1x01234567);
const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(uint8_t)));
const __m128i vxk1x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk1x01234567), vk_zero_point);
const __m128i vprod1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
const __m128i vprod1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod1x01234567lo, vprod1x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod1x01234567lo, vprod1x01234567hi));
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(vi2x01234567);
const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(uint8_t)));
const __m128i vxk2x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk2x01234567), vk_zero_point);
const __m128i vprod2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
const __m128i vprod2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod2x01234567lo, vprod2x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod2x01234567lo, vprod2x01234567hi));
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(vi3x01234567);
const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(uint8_t)));
const __m128i vxk3x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk3x01234567), vk_zero_point);
const __m128i vprod3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
const __m128i vprod3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod3x01234567lo, vprod3x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod3x01234567lo, vprod3x01234567hi));
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(vi4x01234567);
const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(uint8_t)));
const __m128i vxk4x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk4x01234567), vk_zero_point);
const __m128i vprod4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
const __m128i vprod4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod4x01234567lo, vprod4x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod4x01234567lo, vprod4x01234567hi));
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(vi5x01234567);
const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(uint8_t)));
const __m128i vxk5x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk5x01234567), vk_zero_point);
const __m128i vprod5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
const __m128i vprod5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod5x01234567lo, vprod5x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod5x01234567lo, vprod5x01234567hi));
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(vi6x01234567);
const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(uint8_t)));
const __m128i vxk6x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk6x01234567), vk_zero_point);
const __m128i vprod6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
const __m128i vprod6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod6x01234567lo, vprod6x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod6x01234567lo, vprod6x01234567hi));
const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
const __m128i vxi7x01234567 = _mm_cvtepu8_epi16(vi7x01234567);
const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(uint8_t)));
const __m128i vxk7x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk7x01234567), vk_zero_point);
const __m128i vprod7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
const __m128i vprod7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod7x01234567lo, vprod7x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod7x01234567lo, vprod7x01234567hi));
const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
const __m128i vxi8x01234567 = _mm_cvtepu8_epi16(vi8x01234567);
const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(uint8_t)));
const __m128i vxk8x01234567 = _mm_sub_epi16(_mm_cvtepu8_epi16(vk8x01234567), vk_zero_point);
const __m128i vprod8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
const __m128i vprod8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vprod8x01234567lo, vprod8x01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vprod8x01234567lo, vprod8x01234567hi));
__m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vscaled0123);
vacc4567 = _mm_cvtps_epi32(vscaled4567);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (c & 1) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
| 19,741
| 49.620513
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__avx_x16(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
__m128i vx89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 8)));
__m128i vxCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 12)));
input += 16;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
vx89AB = _mm_add_epi32(vx89AB, vminus_zero_point);
vxCDEF = _mm_add_epi32(vxCDEF, vminus_zero_point);
const __m256i vx01234567 = _mm256_insertf128_si256(_mm256_castsi128_si256(vx0123), vx4567, 1);
const __m256i vx89ABCDEF = _mm256_insertf128_si256(_mm256_castsi128_si256(vx89AB), vxCDEF, 1);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
__m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 3 * sizeof(uint8_t));
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 3,039
| 33.545455
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-avx-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__avx_x24(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 24 * sizeof(uint8_t); batch -= 24 * sizeof(uint8_t)) {
__m128i vx0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
__m128i vx89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 8)));
__m128i vxCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 12)));
__m128i vxGHIJ = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 16)));
__m128i vxKLMN = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 20)));
input += 24;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
vx89AB = _mm_add_epi32(vx89AB, vminus_zero_point);
vxCDEF = _mm_add_epi32(vxCDEF, vminus_zero_point);
vxGHIJ = _mm_add_epi32(vxGHIJ, vminus_zero_point);
vxKLMN = _mm_add_epi32(vxKLMN, vminus_zero_point);
const __m256i vx01234567 = _mm256_insertf128_si256(_mm256_castsi128_si256(vx0123), vx4567, 1);
const __m256i vx89ABCDEF = _mm256_insertf128_si256(_mm256_castsi128_si256(vx89AB), vxCDEF, 1);
const __m256i vxGHIJKLMN = _mm256_insertf128_si256(_mm256_castsi128_si256(vxGHIJ), vxKLMN, 1);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
__m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
__m256 vyGHIJKLMN = _mm256_cvtepi32_ps(vxGHIJKLMN);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
vyGHIJKLMN = _mm256_mul_ps(vyGHIJKLMN, vscale);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
_mm256_storeu_ps(output + 16, vyGHIJKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 3 * sizeof(uint8_t));
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 3,597
| 36.479167
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-avx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__avx_x32(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m128i vx0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
__m128i vx89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 8)));
__m128i vxCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 12)));
__m128i vxGHIJ = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 16)));
__m128i vxKLMN = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 20)));
__m128i vxOPQR = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 24)));
__m128i vxSTUV = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 28)));
input += 32;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
vx89AB = _mm_add_epi32(vx89AB, vminus_zero_point);
vxCDEF = _mm_add_epi32(vxCDEF, vminus_zero_point);
vxGHIJ = _mm_add_epi32(vxGHIJ, vminus_zero_point);
vxKLMN = _mm_add_epi32(vxKLMN, vminus_zero_point);
vxOPQR = _mm_add_epi32(vxOPQR, vminus_zero_point);
vxSTUV = _mm_add_epi32(vxSTUV, vminus_zero_point);
const __m256i vx01234567 = _mm256_insertf128_si256(_mm256_castsi128_si256(vx0123), vx4567, 1);
const __m256i vx89ABCDEF = _mm256_insertf128_si256(_mm256_castsi128_si256(vx89AB), vxCDEF, 1);
const __m256i vxGHIJKLMN = _mm256_insertf128_si256(_mm256_castsi128_si256(vxGHIJ), vxKLMN, 1);
const __m256i vxOPQRSTUV = _mm256_insertf128_si256(_mm256_castsi128_si256(vxOPQR), vxSTUV, 1);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
__m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
__m256 vyGHIJKLMN = _mm256_cvtepi32_ps(vxGHIJKLMN);
__m256 vyOPQRSTUV = _mm256_cvtepi32_ps(vxOPQRSTUV);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
vyGHIJKLMN = _mm256_mul_ps(vyGHIJKLMN, vscale);
vyOPQRSTUV = _mm256_mul_ps(vyOPQRSTUV, vscale);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
_mm256_storeu_ps(output + 16, vyGHIJKLMN);
_mm256_storeu_ps(output + 24, vyOPQRSTUV);
output += 32;
}
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 3 * sizeof(uint8_t));
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 4,155
| 38.961538
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__avx_x8(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vx0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
input += 8;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
const __m256i vx01234567 = _mm256_insertf128_si256(_mm256_castsi128_si256(vx0123), vx4567, 1);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
_mm256_storeu_ps(output, vy01234567);
output += 8;
}
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 3 * sizeof(uint8_t));
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, _mm256_castps256_ps128(vscale));
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 2,478
| 29.9875
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-avx2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__avx2_x16(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vminus_zero_point = _mm256_load_si256((const __m256i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m256i vx01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) input));
__m256i vx89ABCDEF = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) (input + 8)));
input += 16;
vx01234567 = _mm256_add_epi32(vx01234567, vminus_zero_point);
vx89ABCDEF = _mm256_add_epi32(vx89ABCDEF, vminus_zero_point);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
__m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m256i vx = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
input += 8;
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m256i vx = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 2,733
| 30.425287
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-avx2-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__avx2_x24(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vminus_zero_point = _mm256_load_si256((const __m256i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 24 * sizeof(uint8_t); batch -= 24 * sizeof(uint8_t)) {
__m256i vx01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) input));
__m256i vx89ABCDEF = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) (input + 8)));
__m256i vxGHIJKLMN = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) (input + 16)));
input += 24;
vx01234567 = _mm256_add_epi32(vx01234567, vminus_zero_point);
vx89ABCDEF = _mm256_add_epi32(vx89ABCDEF, vminus_zero_point);
vxGHIJKLMN = _mm256_add_epi32(vxGHIJKLMN, vminus_zero_point);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
__m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
__m256 vyGHIJKLMN = _mm256_cvtepi32_ps(vxGHIJKLMN);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
vyGHIJKLMN = _mm256_mul_ps(vyGHIJKLMN, vscale);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
_mm256_storeu_ps(output + 16, vyGHIJKLMN);
output += 24;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m256i vx = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
input += 8;
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m256i vx = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 3,049
| 32.152174
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-avx2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__avx2_x32(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vminus_zero_point = _mm256_load_si256((const __m256i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m256i vx01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) input));
__m256i vx89ABCDEF = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) (input + 8)));
__m256i vxGHIJKLMN = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) (input + 16)));
__m256i vxOPQRSTUV = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) (input + 24)));
input += 32;
vx01234567 = _mm256_add_epi32(vx01234567, vminus_zero_point);
vx89ABCDEF = _mm256_add_epi32(vx89ABCDEF, vminus_zero_point);
vxGHIJKLMN = _mm256_add_epi32(vxGHIJKLMN, vminus_zero_point);
vxOPQRSTUV = _mm256_add_epi32(vxOPQRSTUV, vminus_zero_point);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
__m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
__m256 vyGHIJKLMN = _mm256_cvtepi32_ps(vxGHIJKLMN);
__m256 vyOPQRSTUV = _mm256_cvtepi32_ps(vxOPQRSTUV);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
vyGHIJKLMN = _mm256_mul_ps(vyGHIJKLMN, vscale);
vyOPQRSTUV = _mm256_mul_ps(vyOPQRSTUV, vscale);
_mm256_storeu_ps(output, vy01234567);
_mm256_storeu_ps(output + 8, vy89ABCDEF);
_mm256_storeu_ps(output + 16, vyGHIJKLMN);
_mm256_storeu_ps(output + 24, vyOPQRSTUV);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m256i vx = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
input += 8;
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m256i vx = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 3,365
| 33.701031
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-avx2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__avx2_x8(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vminus_zero_point = _mm256_load_si256((const __m256i*) params->avx.minus_zero_point);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m256i vx01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) input));
input += 8;
vx01234567 = _mm256_add_epi32(vx01234567, vminus_zero_point);
__m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
vy01234567 = _mm256_mul_ps(vy01234567, vscale);
_mm256_storeu_ps(output, vy01234567);
output += 8;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m256i vx = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
input += 8;
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m256i vx = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) input));
vx = _mm256_add_epi32(vx, vminus_zero_point);
__m256 vy = _mm256_cvtepi32_ps(vx);
vy = _mm256_mul_ps(vy, vscale);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 2,414
| 28.45122
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-avx512skx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__avx512skx_x16(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vminus_zero_point = _mm512_load_si512(params->avx512.minus_zero_point);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m512i vx = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
input += 16;
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
// Prepare mask for valid elements (depends on batch).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512i vx = _mm512_cvtepu8_epi32(_mm_maskz_loadu_epi8(vmask, input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 1,781
| 29.20339
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-avx512skx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__avx512skx_x32(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vminus_zero_point = _mm512_load_si512(params->avx512.minus_zero_point);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m512i vx0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) input));
__m512i vxGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (input + 16)));
input += 32;
vx0123456789ABCDEF = _mm512_add_epi32(vx0123456789ABCDEF, vminus_zero_point);
vxGHIJKLMNOPQRSTUV = _mm512_add_epi32(vxGHIJKLMNOPQRSTUV, vminus_zero_point);
__m512 vy0123456789ABCDEF = _mm512_cvtepi32_ps(vx0123456789ABCDEF);
__m512 vyGHIJKLMNOPQRSTUV = _mm512_cvtepi32_ps(vxGHIJKLMNOPQRSTUV);
vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vscale);
vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vyGHIJKLMNOPQRSTUV, vscale);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m512i vx = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
input += 16;
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
// Prepare mask for valid elements (depends on batch).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512i vx = _mm512_cvtepu8_epi32(_mm_maskz_loadu_epi8(vmask, input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 2,645
| 33.363636
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-avx512skx-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__avx512skx_x48(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vminus_zero_point = _mm512_load_si512(params->avx512.minus_zero_point);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
for (; batch >= 48 * sizeof(uint8_t); batch -= 48 * sizeof(uint8_t)) {
__m512i vx0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) input));
__m512i vxGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (input + 16)));
__m512i vxWXYZ = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (input + 32)));
input += 48;
vx0123456789ABCDEF = _mm512_add_epi32(vx0123456789ABCDEF, vminus_zero_point);
vxGHIJKLMNOPQRSTUV = _mm512_add_epi32(vxGHIJKLMNOPQRSTUV, vminus_zero_point);
vxWXYZ = _mm512_add_epi32(vxWXYZ, vminus_zero_point);
__m512 vy0123456789ABCDEF = _mm512_cvtepi32_ps(vx0123456789ABCDEF);
__m512 vyGHIJKLMNOPQRSTUV = _mm512_cvtepi32_ps(vxGHIJKLMNOPQRSTUV);
__m512 vyWXYZ = _mm512_cvtepi32_ps(vxWXYZ);
vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vscale);
vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vyGHIJKLMNOPQRSTUV, vscale);
vyWXYZ = _mm512_mul_ps(vyWXYZ, vscale);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vyGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output + 32, vyWXYZ);
output += 48;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m512i vx = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
input += 16;
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
// Prepare mask for valid elements (depends on batch).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512i vx = _mm512_cvtepu8_epi32(_mm_maskz_loadu_epi8(vmask, input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 2,929
| 34.731707
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-avx512skx-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__avx512skx_x64(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vminus_zero_point = _mm512_load_si512(params->avx512.minus_zero_point);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m512i vx0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) input));
__m512i vxGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (input + 16)));
__m512i vxWXYZ = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (input + 32)));
__m512i vx = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (input + 48)));
input += 64;
vx0123456789ABCDEF = _mm512_add_epi32(vx0123456789ABCDEF, vminus_zero_point);
vxGHIJKLMNOPQRSTUV = _mm512_add_epi32(vxGHIJKLMNOPQRSTUV, vminus_zero_point);
vxWXYZ = _mm512_add_epi32(vxWXYZ, vminus_zero_point);
vx = _mm512_add_epi32(vx, vminus_zero_point);
__m512 vy0123456789ABCDEF = _mm512_cvtepi32_ps(vx0123456789ABCDEF);
__m512 vyGHIJKLMNOPQRSTUV = _mm512_cvtepi32_ps(vxGHIJKLMNOPQRSTUV);
__m512 vyWXYZ = _mm512_cvtepi32_ps(vxWXYZ);
__m512 vy = _mm512_cvtepi32_ps(vx);
vy0123456789ABCDEF = _mm512_mul_ps(vy0123456789ABCDEF, vscale);
vyGHIJKLMNOPQRSTUV = _mm512_mul_ps(vyGHIJKLMNOPQRSTUV, vscale);
vyWXYZ = _mm512_mul_ps(vyWXYZ, vscale);
vy = _mm512_mul_ps(vy, vscale);
_mm512_storeu_ps(output, vy0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vyGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output + 32, vyWXYZ);
_mm512_storeu_ps(output + 48, vy);
output += 64;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m512i vx = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
input += 16;
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
// Prepare mask for valid elements (depends on batch).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512i vx = _mm512_cvtepu8_epi32(_mm_maskz_loadu_epi8(vmask, input));
vx = _mm512_add_epi32(vx, vminus_zero_point);
__m512 vy = _mm512_cvtepi32_ps(vx);
vy = _mm512_mul_ps(vy, vscale);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 3,181
| 35.574713
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__neon_x16(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vminus_zero_point = vreinterpretq_s16_u32(vld1q_dup_u32((const void*) params->neon.minus_zero_point));
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x8_t vx01234567 = vld1_u8(input); input += 8;
const uint8x8_t vx89ABCDEF = vld1_u8(input); input += 8;
const int16x8_t vhx01234567 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx01234567));
const int16x8_t vhx89ABCDEF = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx89ABCDEF));
const int32x4_t vwx0123 = vmovl_s16(vget_low_s16(vhx01234567));
const int32x4_t vwx4567 = vmovl_s16(vget_high_s16(vhx01234567));
const int32x4_t vwx89AB = vmovl_s16(vget_low_s16(vhx89ABCDEF));
const int32x4_t vwxCDEF = vmovl_s16(vget_high_s16(vhx89ABCDEF));
float32x4_t vy0123 = vcvtq_f32_s32(vwx0123);
float32x4_t vy4567 = vcvtq_f32_s32(vwx4567);
float32x4_t vy89AB = vcvtq_f32_s32(vwx89AB);
float32x4_t vyCDEF = vcvtq_f32_s32(vwxCDEF);
vy0123 = vmulq_f32(vy0123, vscale);
vy4567 = vmulq_f32(vy4567, vscale);
vy89AB = vmulq_f32(vy89AB, vscale);
vyCDEF = vmulq_f32(vyCDEF, vscale);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
const int16x8_t vhx = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx));
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy_lo = vcvtq_f32_s32(vwx_lo);
float32x4_t vy_hi = vcvtq_f32_s32(vwx_hi);
vy_lo = vmulq_f32(vy_lo, vscale);
vy_hi = vmulq_f32(vy_hi, vscale);
vst1q_f32(output, vy_lo); output += 4;
vst1q_f32(output, vy_hi); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
const int16x8_t vhx = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx));
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy = vcvtq_f32_s32(vwx_lo);
vy = vmulq_f32(vy, vscale);
if (batch & (4 * sizeof(uint8_t))) {
vst1q_f32(output, vy); output += 4;
vy = vcvtq_f32_s32(vwx_hi);
vy = vmulq_f32(vy, vscale);
}
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(uint8_t))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 3,655
| 33.819048
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-neon-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__neon_x24(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vminus_zero_point = vreinterpretq_s16_u32(vld1q_dup_u32((const void*) params->neon.minus_zero_point));
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
for (; batch >= 24 * sizeof(uint8_t); batch -= 24 * sizeof(uint8_t)) {
const uint8x8_t vx01234567 = vld1_u8(input); input += 8;
const uint8x8_t vx89ABCDEF = vld1_u8(input); input += 8;
const uint8x8_t vxGHIJKLMN = vld1_u8(input); input += 8;
const int16x8_t vhx01234567 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx01234567));
const int16x8_t vhx89ABCDEF = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx89ABCDEF));
const int16x8_t vhxGHIJKLMN = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vxGHIJKLMN));
const int32x4_t vwx0123 = vmovl_s16(vget_low_s16(vhx01234567));
const int32x4_t vwx4567 = vmovl_s16(vget_high_s16(vhx01234567));
const int32x4_t vwx89AB = vmovl_s16(vget_low_s16(vhx89ABCDEF));
const int32x4_t vwxCDEF = vmovl_s16(vget_high_s16(vhx89ABCDEF));
const int32x4_t vwxGHIJ = vmovl_s16(vget_low_s16(vhxGHIJKLMN));
const int32x4_t vwxKLMN = vmovl_s16(vget_high_s16(vhxGHIJKLMN));
float32x4_t vy0123 = vcvtq_f32_s32(vwx0123);
float32x4_t vy4567 = vcvtq_f32_s32(vwx4567);
float32x4_t vy89AB = vcvtq_f32_s32(vwx89AB);
float32x4_t vyCDEF = vcvtq_f32_s32(vwxCDEF);
float32x4_t vyGHIJ = vcvtq_f32_s32(vwxGHIJ);
float32x4_t vyKLMN = vcvtq_f32_s32(vwxKLMN);
vy0123 = vmulq_f32(vy0123, vscale);
vy4567 = vmulq_f32(vy4567, vscale);
vy89AB = vmulq_f32(vy89AB, vscale);
vyCDEF = vmulq_f32(vyCDEF, vscale);
vyGHIJ = vmulq_f32(vyGHIJ, vscale);
vyKLMN = vmulq_f32(vyKLMN, vscale);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
vst1q_f32(output, vyGHIJ); output += 4;
vst1q_f32(output, vyKLMN); output += 4;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
const int16x8_t vhx = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx));
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy_lo = vcvtq_f32_s32(vwx_lo);
float32x4_t vy_hi = vcvtq_f32_s32(vwx_hi);
vy_lo = vmulq_f32(vy_lo, vscale);
vy_hi = vmulq_f32(vy_hi, vscale);
vst1q_f32(output, vy_lo); output += 4;
vst1q_f32(output, vy_hi); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
const int16x8_t vhx = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx));
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy = vcvtq_f32_s32(vwx_lo);
vy = vmulq_f32(vy, vscale);
if (batch & (4 * sizeof(uint8_t))) {
vst1q_f32(output, vy); output += 4;
vy = vcvtq_f32_s32(vwx_hi);
vy = vmulq_f32(vy, vscale);
}
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(uint8_t))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 4,240
| 35.878261
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__neon_x32(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vminus_zero_point = vreinterpretq_s16_u32(vld1q_dup_u32((const void*) params->neon.minus_zero_point));
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
const uint8x8_t vx01234567 = vld1_u8(input); input += 8;
const uint8x8_t vx89ABCDEF = vld1_u8(input); input += 8;
const uint8x8_t vxGHIJKLMN = vld1_u8(input); input += 8;
const uint8x8_t vxOPQRSTUV = vld1_u8(input); input += 8;
const int16x8_t vhx01234567 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx01234567));
const int16x8_t vhx89ABCDEF = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx89ABCDEF));
const int16x8_t vhxGHIJKLMN = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vxGHIJKLMN));
const int16x8_t vhxOPQRSTUV = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vxOPQRSTUV));
const int32x4_t vwx0123 = vmovl_s16(vget_low_s16(vhx01234567));
const int32x4_t vwx4567 = vmovl_s16(vget_high_s16(vhx01234567));
const int32x4_t vwx89AB = vmovl_s16(vget_low_s16(vhx89ABCDEF));
const int32x4_t vwxCDEF = vmovl_s16(vget_high_s16(vhx89ABCDEF));
const int32x4_t vwxGHIJ = vmovl_s16(vget_low_s16(vhxGHIJKLMN));
const int32x4_t vwxKLMN = vmovl_s16(vget_high_s16(vhxGHIJKLMN));
const int32x4_t vwxOPQR = vmovl_s16(vget_low_s16(vhxOPQRSTUV));
const int32x4_t vwxSTUV = vmovl_s16(vget_high_s16(vhxOPQRSTUV));
float32x4_t vy0123 = vcvtq_f32_s32(vwx0123);
float32x4_t vy4567 = vcvtq_f32_s32(vwx4567);
float32x4_t vy89AB = vcvtq_f32_s32(vwx89AB);
float32x4_t vyCDEF = vcvtq_f32_s32(vwxCDEF);
float32x4_t vyGHIJ = vcvtq_f32_s32(vwxGHIJ);
float32x4_t vyKLMN = vcvtq_f32_s32(vwxKLMN);
float32x4_t vyOPQR = vcvtq_f32_s32(vwxOPQR);
float32x4_t vySTUV = vcvtq_f32_s32(vwxSTUV);
vy0123 = vmulq_f32(vy0123, vscale);
vy4567 = vmulq_f32(vy4567, vscale);
vy89AB = vmulq_f32(vy89AB, vscale);
vyCDEF = vmulq_f32(vyCDEF, vscale);
vyGHIJ = vmulq_f32(vyGHIJ, vscale);
vyKLMN = vmulq_f32(vyKLMN, vscale);
vyOPQR = vmulq_f32(vyOPQR, vscale);
vySTUV = vmulq_f32(vySTUV, vscale);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
vst1q_f32(output, vyGHIJ); output += 4;
vst1q_f32(output, vyKLMN); output += 4;
vst1q_f32(output, vyOPQR); output += 4;
vst1q_f32(output, vySTUV); output += 4;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
const int16x8_t vhx = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx));
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy_lo = vcvtq_f32_s32(vwx_lo);
float32x4_t vy_hi = vcvtq_f32_s32(vwx_hi);
vy_lo = vmulq_f32(vy_lo, vscale);
vy_hi = vmulq_f32(vy_hi, vscale);
vst1q_f32(output, vy_lo); output += 4;
vst1q_f32(output, vy_hi); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
const int16x8_t vhx = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx));
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy = vcvtq_f32_s32(vwx_lo);
vy = vmulq_f32(vy, vscale);
if (batch & (4 * sizeof(uint8_t))) {
vst1q_f32(output, vy); output += 4;
vy = vcvtq_f32_s32(vwx_hi);
vy = vmulq_f32(vy, vscale);
}
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(uint8_t))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 4,825
| 37.608
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__neon_x8(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vminus_zero_point = vreinterpretq_s16_u32(vld1q_dup_u32((const void*) params->neon.minus_zero_point));
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
const int16x8_t vhx = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx));
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy_lo = vcvtq_f32_s32(vwx_lo);
float32x4_t vy_hi = vcvtq_f32_s32(vwx_hi);
vy_lo = vmulq_f32(vy_lo, vscale);
vy_hi = vmulq_f32(vy_hi, vscale);
vst1q_f32(output, vy_lo); output += 4;
vst1q_f32(output, vy_hi); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
const int16x8_t vhx = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx));
const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
float32x4_t vy = vcvtq_f32_s32(vwx_lo);
vy = vmulq_f32(vy, vscale);
if (batch & (4 * sizeof(uint8_t))) {
vst1q_f32(output, vy); output += 4;
vy = vcvtq_f32_s32(vwx_hi);
vy = vmulq_f32(vy, vscale);
}
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(uint8_t))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 2,402
| 29.807692
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__scalar_x1(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vzero_point = params->scalar.zero_point;
const float vscale = params->scalar.scale;
do {
int32_t vx = *input++;
vx -= vzero_point;
float vy = (float) vx;
vy *= vscale;
*output++ = vy;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
| 961
| 21.904762
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__scalar_x2(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vzero_point = params->scalar.zero_point;
const float vscale = params->scalar.scale;
for (; batch >= 2 * sizeof(uint8_t); batch -= 2 * sizeof(uint8_t)) {
int32_t vx0 = (int32_t) input[0];
int32_t vx1 = (int32_t) input[1];
input += 2;
vx0 -= vzero_point;
vx1 -= vzero_point;
float vy0 = (float) vx0;
float vy1 = (float) vx1;
vy0 *= vscale;
vy1 *= vscale;
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
int32_t vx = *input;
vx -= vzero_point;
float vy = (float) vx;
vy *= vscale;
*output = vy;
}
}
| 1,305
| 21.517241
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-scalar-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__scalar_x3(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vzero_point = params->scalar.zero_point;
const float vscale = params->scalar.scale;
for (; batch >= 3 * sizeof(uint8_t); batch -= 3 * sizeof(uint8_t)) {
int32_t vx0 = (int32_t) input[0];
int32_t vx1 = (int32_t) input[1];
int32_t vx2 = (int32_t) input[2];
input += 3;
vx0 -= vzero_point;
vx1 -= vzero_point;
vx2 -= vzero_point;
float vy0 = (float) vx0;
float vy1 = (float) vx1;
float vy2 = (float) vx2;
vy0 *= vscale;
vy1 *= vscale;
vy2 *= vscale;
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
int32_t vx = *input++;
vx -= vzero_point;
float vy = (float) vx;
vy *= vscale;
*output++ = vy;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 1,518
| 21.671642
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__scalar_x4(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vzero_point = params->scalar.zero_point;
const float vscale = params->scalar.scale;
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
int32_t vx0 = (int32_t) input[0];
int32_t vx1 = (int32_t) input[1];
int32_t vx2 = (int32_t) input[2];
int32_t vx3 = (int32_t) input[3];
input += 4;
vx0 -= vzero_point;
vx1 -= vzero_point;
vx2 -= vzero_point;
vx3 -= vzero_point;
float vy0 = (float) vx0;
float vy1 = (float) vx1;
float vy2 = (float) vx2;
float vy3 = (float) vx3;
vy0 *= vscale;
vy1 *= vscale;
vy2 *= vscale;
vy3 *= vscale;
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
int32_t vx = *input++;
vx -= vzero_point;
float vy = (float) vx;
vy *= vscale;
*output++ = vy;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 1,649
| 21.916667
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-sse2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__sse2_x16(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx01234567 = _mm_loadl_epi64((const __m128i*) input);
__m128i vx89ABCDEF = _mm_loadl_epi64((const __m128i*) (input + 8));
input += 16;
vx01234567 = _mm_unpacklo_epi8(vx01234567, vzero);
vx89ABCDEF = _mm_unpacklo_epi8(vx89ABCDEF, vzero);
__m128 vy0123 = _mm_castsi128_ps(_mm_unpacklo_epi16(vx01234567, vmagic_exp));
__m128 vy4567 = _mm_castsi128_ps(_mm_unpackhi_epi16(vx01234567, vmagic_exp));
__m128 vy89AB = _mm_castsi128_ps(_mm_unpacklo_epi16(vx89ABCDEF, vmagic_exp));
__m128 vyCDEF = _mm_castsi128_ps(_mm_unpackhi_epi16(vx89ABCDEF, vmagic_exp));
vy0123 = _mm_sub_ps(vy0123, vmagic_bias);
vy4567 = _mm_sub_ps(vy4567, vmagic_bias);
vy89AB = _mm_sub_ps(vy89AB, vmagic_bias);
vyCDEF = _mm_sub_ps(vyCDEF, vmagic_bias);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
vy89AB = _mm_mul_ps(vy89AB, vscale);
vyCDEF = _mm_mul_ps(vyCDEF, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_unpacklo_epi8(vx, vzero);
input += 8;
__m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
__m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
vy_lo = _mm_mul_ps(vy_lo, vscale);
vy_hi = _mm_mul_ps(vy_hi, vscale);
_mm_storeu_ps(output, vy_lo);
_mm_storeu_ps(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_unpacklo_epi8(vx, vzero);
__m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_ps(output, vy);
vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 3,594
| 31.681818
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-sse2-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__sse2_x24(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 24 * sizeof(uint8_t); batch -= 24 * sizeof(uint8_t)) {
__m128i vx01234567 = _mm_loadl_epi64((const __m128i*) input);
__m128i vx89ABCDEF = _mm_loadl_epi64((const __m128i*) (input + 8));
__m128i vxGHIJKLMN = _mm_loadl_epi64((const __m128i*) (input + 16));
input += 24;
vx01234567 = _mm_unpacklo_epi8(vx01234567, vzero);
vx89ABCDEF = _mm_unpacklo_epi8(vx89ABCDEF, vzero);
vxGHIJKLMN = _mm_unpacklo_epi8(vxGHIJKLMN, vzero);
__m128 vy0123 = _mm_castsi128_ps(_mm_unpacklo_epi16(vx01234567, vmagic_exp));
__m128 vy4567 = _mm_castsi128_ps(_mm_unpackhi_epi16(vx01234567, vmagic_exp));
__m128 vy89AB = _mm_castsi128_ps(_mm_unpacklo_epi16(vx89ABCDEF, vmagic_exp));
__m128 vyCDEF = _mm_castsi128_ps(_mm_unpackhi_epi16(vx89ABCDEF, vmagic_exp));
__m128 vyGHIJ = _mm_castsi128_ps(_mm_unpacklo_epi16(vxGHIJKLMN, vmagic_exp));
__m128 vyKLMN = _mm_castsi128_ps(_mm_unpackhi_epi16(vxGHIJKLMN, vmagic_exp));
vy0123 = _mm_sub_ps(vy0123, vmagic_bias);
vy4567 = _mm_sub_ps(vy4567, vmagic_bias);
vy89AB = _mm_sub_ps(vy89AB, vmagic_bias);
vyCDEF = _mm_sub_ps(vyCDEF, vmagic_bias);
vyGHIJ = _mm_sub_ps(vyGHIJ, vmagic_bias);
vyKLMN = _mm_sub_ps(vyKLMN, vmagic_bias);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
vy89AB = _mm_mul_ps(vy89AB, vscale);
vyCDEF = _mm_mul_ps(vyCDEF, vscale);
vyGHIJ = _mm_mul_ps(vyGHIJ, vscale);
vyKLMN = _mm_mul_ps(vyKLMN, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_unpacklo_epi8(vx, vzero);
input += 8;
__m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
__m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
vy_lo = _mm_mul_ps(vy_lo, vscale);
vy_hi = _mm_mul_ps(vy_hi, vscale);
_mm_storeu_ps(output, vy_lo);
_mm_storeu_ps(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_unpacklo_epi8(vx, vzero);
__m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_ps(output, vy);
vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 4,140
| 33.508333
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-sse2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__sse2_x32(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m128i vx01234567 = _mm_loadl_epi64((const __m128i*) input);
__m128i vx89ABCDEF = _mm_loadl_epi64((const __m128i*) (input + 8));
__m128i vxGHIJKLMN = _mm_loadl_epi64((const __m128i*) (input + 16));
__m128i vxOPQRSTUV = _mm_loadl_epi64((const __m128i*) (input + 24));
input += 32;
vx01234567 = _mm_unpacklo_epi8(vx01234567, vzero);
vx89ABCDEF = _mm_unpacklo_epi8(vx89ABCDEF, vzero);
vxGHIJKLMN = _mm_unpacklo_epi8(vxGHIJKLMN, vzero);
vxOPQRSTUV = _mm_unpacklo_epi8(vxOPQRSTUV, vzero);
__m128 vy0123 = _mm_castsi128_ps(_mm_unpacklo_epi16(vx01234567, vmagic_exp));
__m128 vy4567 = _mm_castsi128_ps(_mm_unpackhi_epi16(vx01234567, vmagic_exp));
__m128 vy89AB = _mm_castsi128_ps(_mm_unpacklo_epi16(vx89ABCDEF, vmagic_exp));
__m128 vyCDEF = _mm_castsi128_ps(_mm_unpackhi_epi16(vx89ABCDEF, vmagic_exp));
__m128 vyGHIJ = _mm_castsi128_ps(_mm_unpacklo_epi16(vxGHIJKLMN, vmagic_exp));
__m128 vyKLMN = _mm_castsi128_ps(_mm_unpackhi_epi16(vxGHIJKLMN, vmagic_exp));
__m128 vyOPQR = _mm_castsi128_ps(_mm_unpacklo_epi16(vxOPQRSTUV, vmagic_exp));
__m128 vySTUV = _mm_castsi128_ps(_mm_unpackhi_epi16(vxOPQRSTUV, vmagic_exp));
vy0123 = _mm_sub_ps(vy0123, vmagic_bias);
vy4567 = _mm_sub_ps(vy4567, vmagic_bias);
vy89AB = _mm_sub_ps(vy89AB, vmagic_bias);
vyCDEF = _mm_sub_ps(vyCDEF, vmagic_bias);
vyGHIJ = _mm_sub_ps(vyGHIJ, vmagic_bias);
vyKLMN = _mm_sub_ps(vyKLMN, vmagic_bias);
vyOPQR = _mm_sub_ps(vyOPQR, vmagic_bias);
vySTUV = _mm_sub_ps(vySTUV, vmagic_bias);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
vy89AB = _mm_mul_ps(vy89AB, vscale);
vyCDEF = _mm_mul_ps(vyCDEF, vscale);
vyGHIJ = _mm_mul_ps(vyGHIJ, vscale);
vyKLMN = _mm_mul_ps(vyKLMN, vscale);
vyOPQR = _mm_mul_ps(vyOPQR, vscale);
vySTUV = _mm_mul_ps(vySTUV, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
_mm_storeu_ps(output + 24, vyOPQR);
_mm_storeu_ps(output + 28, vySTUV);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_unpacklo_epi8(vx, vzero);
input += 8;
__m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
__m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
vy_lo = _mm_mul_ps(vy_lo, vscale);
vy_hi = _mm_mul_ps(vy_hi, vscale);
_mm_storeu_ps(output, vy_lo);
_mm_storeu_ps(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_unpacklo_epi8(vx, vzero);
__m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_ps(output, vy);
vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 4,686
| 35.053846
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-sse2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__sse2_x8(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_unpacklo_epi8(vx, vzero);
input += 8;
__m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
__m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
vy_lo = _mm_mul_ps(vy_lo, vscale);
vy_hi = _mm_mul_ps(vy_hi, vscale);
_mm_storeu_ps(output, vy_lo);
_mm_storeu_ps(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_unpacklo_epi8(vx, vzero);
__m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_ps(output, vy);
vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 2,398
| 28.9875
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-sse41-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__sse41_x16(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->sse4.minus_zero_point);
const __m128 vscale = _mm_load_ps(params->sse4.scale);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vx0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
__m128i vx89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 8)));
__m128i vxCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 12)));
input += 16;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
vx89AB = _mm_add_epi32(vx89AB, vminus_zero_point);
vxCDEF = _mm_add_epi32(vxCDEF, vminus_zero_point);
__m128 vy0123 = _mm_cvtepi32_ps(vx0123);
__m128 vy4567 = _mm_cvtepi32_ps(vx4567);
__m128 vy89AB = _mm_cvtepi32_ps(vx89AB);
__m128 vyCDEF = _mm_cvtepi32_ps(vxCDEF);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
vy89AB = _mm_mul_ps(vy89AB, vscale);
vyCDEF = _mm_mul_ps(vyCDEF, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 3 * sizeof(uint8_t));
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 2,987
| 31.835165
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-sse41-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__sse41_x24(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->sse4.minus_zero_point);
const __m128 vscale = _mm_load_ps(params->sse4.scale);
for (; batch >= 24 * sizeof(uint8_t); batch -= 24 * sizeof(uint8_t)) {
__m128i vx0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
__m128i vx89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 8)));
__m128i vxCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 12)));
__m128i vxGHIJ = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 16)));
__m128i vxKLMN = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 20)));
input += 24;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
vx89AB = _mm_add_epi32(vx89AB, vminus_zero_point);
vxCDEF = _mm_add_epi32(vxCDEF, vminus_zero_point);
vxGHIJ = _mm_add_epi32(vxGHIJ, vminus_zero_point);
vxKLMN = _mm_add_epi32(vxKLMN, vminus_zero_point);
__m128 vy0123 = _mm_cvtepi32_ps(vx0123);
__m128 vy4567 = _mm_cvtepi32_ps(vx4567);
__m128 vy89AB = _mm_cvtepi32_ps(vx89AB);
__m128 vyCDEF = _mm_cvtepi32_ps(vxCDEF);
__m128 vyGHIJ = _mm_cvtepi32_ps(vxGHIJ);
__m128 vyKLMN = _mm_cvtepi32_ps(vxKLMN);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
vy89AB = _mm_mul_ps(vy89AB, vscale);
vyCDEF = _mm_mul_ps(vyCDEF, vscale);
vyGHIJ = _mm_mul_ps(vyGHIJ, vscale);
vyKLMN = _mm_mul_ps(vyKLMN, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 3 * sizeof(uint8_t));
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 3,543
| 34.089109
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-sse41-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__sse41_x32(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->sse4.minus_zero_point);
const __m128 vscale = _mm_load_ps(params->sse4.scale);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m128i vx0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
__m128i vx89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 8)));
__m128i vxCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 12)));
__m128i vxGHIJ = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 16)));
__m128i vxKLMN = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 20)));
__m128i vxOPQR = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 24)));
__m128i vxSTUV = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 28)));
input += 32;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
vx89AB = _mm_add_epi32(vx89AB, vminus_zero_point);
vxCDEF = _mm_add_epi32(vxCDEF, vminus_zero_point);
vxGHIJ = _mm_add_epi32(vxGHIJ, vminus_zero_point);
vxKLMN = _mm_add_epi32(vxKLMN, vminus_zero_point);
vxOPQR = _mm_add_epi32(vxOPQR, vminus_zero_point);
vxSTUV = _mm_add_epi32(vxSTUV, vminus_zero_point);
__m128 vy0123 = _mm_cvtepi32_ps(vx0123);
__m128 vy4567 = _mm_cvtepi32_ps(vx4567);
__m128 vy89AB = _mm_cvtepi32_ps(vx89AB);
__m128 vyCDEF = _mm_cvtepi32_ps(vxCDEF);
__m128 vyGHIJ = _mm_cvtepi32_ps(vxGHIJ);
__m128 vyKLMN = _mm_cvtepi32_ps(vxKLMN);
__m128 vyOPQR = _mm_cvtepi32_ps(vxOPQR);
__m128 vySTUV = _mm_cvtepi32_ps(vxSTUV);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
vy89AB = _mm_mul_ps(vy89AB, vscale);
vyCDEF = _mm_mul_ps(vyCDEF, vscale);
vyGHIJ = _mm_mul_ps(vyGHIJ, vscale);
vyKLMN = _mm_mul_ps(vyKLMN, vscale);
vyOPQR = _mm_mul_ps(vyOPQR, vscale);
vySTUV = _mm_mul_ps(vySTUV, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
_mm_storeu_ps(output + 24, vyOPQR);
_mm_storeu_ps(output + 28, vySTUV);
output += 32;
}
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 3 * sizeof(uint8_t));
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 4,099
| 35.936937
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__sse41_x8(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->sse4.minus_zero_point);
const __m128 vscale = _mm_load_ps(params->sse4.scale);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vx0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
input += 8;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
__m128 vy0123 = _mm_cvtepi32_ps(vx0123);
__m128 vy4567 = _mm_cvtepi32_ps(vx4567);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 3 * sizeof(uint8_t));
__m128i vx = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
if (batch & (2 * sizeof(uint8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 2,428
| 28.987654
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__wasmsimd_x16(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vminus_zero_point = wasm_v128_load64_splat(params->wasmsimd.minus_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd.scale);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vx01234567 = wasm_u16x8_load8x8(input);
v128_t vx89ABCDEF = wasm_u16x8_load8x8(input + 8);
input += 16;
vx01234567 = wasm_i16x8_add(vx01234567, vminus_zero_point);
vx89ABCDEF = wasm_i16x8_add(vx89ABCDEF, vminus_zero_point);
v128_t vy0123 = wasm_i32x4_extend_low_i16x8(vx01234567);
v128_t vy4567 = wasm_i32x4_extend_high_i16x8(vx01234567);
v128_t vy89AB = wasm_i32x4_extend_low_i16x8(vx89ABCDEF);
v128_t vyCDEF = wasm_i32x4_extend_high_i16x8(vx89ABCDEF);
vy0123 = wasm_f32x4_convert_i32x4(vy0123);
vy4567 = wasm_f32x4_convert_i32x4(vy4567);
vy89AB = wasm_f32x4_convert_i32x4(vy89AB);
vyCDEF = wasm_f32x4_convert_i32x4(vyCDEF);
vy0123 = wasm_f32x4_mul(vy0123, vscale);
vy4567 = wasm_f32x4_mul(vy4567, vscale);
vy89AB = wasm_f32x4_mul(vy89AB, vscale);
vyCDEF = wasm_f32x4_mul(vyCDEF, vscale);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vx = wasm_u16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy_lo = wasm_i32x4_extend_low_i16x8(vx);
v128_t vy_hi = wasm_i32x4_extend_high_i16x8(vx);
vy_lo = wasm_f32x4_convert_i32x4(vy_lo);
vy_hi = wasm_f32x4_convert_i32x4(vy_hi);
vy_lo = wasm_f32x4_mul(vy_lo, vscale);
vy_hi = wasm_f32x4_mul(vy_hi, vscale);
wasm_v128_store(output, vy_lo);
wasm_v128_store(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vx = wasm_u16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy = wasm_i32x4_extend_low_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store(output, vy); output += 4;
vy = wasm_i32x4_extend_high_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 3,416
| 30.934579
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-wasmsimd-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__wasmsimd_x24(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vminus_zero_point = wasm_v128_load64_splat(params->wasmsimd.minus_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd.scale);
for (; batch >= 24 * sizeof(uint8_t); batch -= 24 * sizeof(uint8_t)) {
v128_t vx01234567 = wasm_u16x8_load8x8(input);
v128_t vx89ABCDEF = wasm_u16x8_load8x8(input + 8);
v128_t vxGHIJKLMN = wasm_u16x8_load8x8(input + 16);
input += 24;
vx01234567 = wasm_i16x8_add(vx01234567, vminus_zero_point);
vx89ABCDEF = wasm_i16x8_add(vx89ABCDEF, vminus_zero_point);
vxGHIJKLMN = wasm_i16x8_add(vxGHIJKLMN, vminus_zero_point);
v128_t vy0123 = wasm_i32x4_extend_low_i16x8(vx01234567);
v128_t vy4567 = wasm_i32x4_extend_high_i16x8(vx01234567);
v128_t vy89AB = wasm_i32x4_extend_low_i16x8(vx89ABCDEF);
v128_t vyCDEF = wasm_i32x4_extend_high_i16x8(vx89ABCDEF);
v128_t vyGHIJ = wasm_i32x4_extend_low_i16x8(vxGHIJKLMN);
v128_t vyKLMN = wasm_i32x4_extend_high_i16x8(vxGHIJKLMN);
vy0123 = wasm_f32x4_convert_i32x4(vy0123);
vy4567 = wasm_f32x4_convert_i32x4(vy4567);
vy89AB = wasm_f32x4_convert_i32x4(vy89AB);
vyCDEF = wasm_f32x4_convert_i32x4(vyCDEF);
vyGHIJ = wasm_f32x4_convert_i32x4(vyGHIJ);
vyKLMN = wasm_f32x4_convert_i32x4(vyKLMN);
vy0123 = wasm_f32x4_mul(vy0123, vscale);
vy4567 = wasm_f32x4_mul(vy4567, vscale);
vy89AB = wasm_f32x4_mul(vy89AB, vscale);
vyCDEF = wasm_f32x4_mul(vyCDEF, vscale);
vyGHIJ = wasm_f32x4_mul(vyGHIJ, vscale);
vyKLMN = wasm_f32x4_mul(vyKLMN, vscale);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
wasm_v128_store(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vx = wasm_u16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy_lo = wasm_i32x4_extend_low_i16x8(vx);
v128_t vy_hi = wasm_i32x4_extend_high_i16x8(vx);
vy_lo = wasm_f32x4_convert_i32x4(vy_lo);
vy_hi = wasm_f32x4_convert_i32x4(vy_hi);
vy_lo = wasm_f32x4_mul(vy_lo, vscale);
vy_hi = wasm_f32x4_mul(vy_hi, vscale);
wasm_v128_store(output, vy_lo);
wasm_v128_store(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vx = wasm_u16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy = wasm_i32x4_extend_low_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store(output, vy); output += 4;
vy = wasm_i32x4_extend_high_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 3,927
| 32.57265
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-wasmsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__wasmsimd_x32(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vminus_zero_point = wasm_v128_load64_splat(params->wasmsimd.minus_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd.scale);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
v128_t vx01234567 = wasm_u16x8_load8x8(input);
v128_t vx89ABCDEF = wasm_u16x8_load8x8(input + 8);
v128_t vxGHIJKLMN = wasm_u16x8_load8x8(input + 16);
v128_t vxOPQRSTUV = wasm_u16x8_load8x8(input + 24);
input += 32;
vx01234567 = wasm_i16x8_add(vx01234567, vminus_zero_point);
vx89ABCDEF = wasm_i16x8_add(vx89ABCDEF, vminus_zero_point);
vxGHIJKLMN = wasm_i16x8_add(vxGHIJKLMN, vminus_zero_point);
vxOPQRSTUV = wasm_i16x8_add(vxOPQRSTUV, vminus_zero_point);
v128_t vy0123 = wasm_i32x4_extend_low_i16x8(vx01234567);
v128_t vy4567 = wasm_i32x4_extend_high_i16x8(vx01234567);
v128_t vy89AB = wasm_i32x4_extend_low_i16x8(vx89ABCDEF);
v128_t vyCDEF = wasm_i32x4_extend_high_i16x8(vx89ABCDEF);
v128_t vyGHIJ = wasm_i32x4_extend_low_i16x8(vxGHIJKLMN);
v128_t vyKLMN = wasm_i32x4_extend_high_i16x8(vxGHIJKLMN);
v128_t vyOPQR = wasm_i32x4_extend_low_i16x8(vxOPQRSTUV);
v128_t vySTUV = wasm_i32x4_extend_high_i16x8(vxOPQRSTUV);
vy0123 = wasm_f32x4_convert_i32x4(vy0123);
vy4567 = wasm_f32x4_convert_i32x4(vy4567);
vy89AB = wasm_f32x4_convert_i32x4(vy89AB);
vyCDEF = wasm_f32x4_convert_i32x4(vyCDEF);
vyGHIJ = wasm_f32x4_convert_i32x4(vyGHIJ);
vyKLMN = wasm_f32x4_convert_i32x4(vyKLMN);
vyOPQR = wasm_f32x4_convert_i32x4(vyOPQR);
vySTUV = wasm_f32x4_convert_i32x4(vySTUV);
vy0123 = wasm_f32x4_mul(vy0123, vscale);
vy4567 = wasm_f32x4_mul(vy4567, vscale);
vy89AB = wasm_f32x4_mul(vy89AB, vscale);
vyCDEF = wasm_f32x4_mul(vyCDEF, vscale);
vyGHIJ = wasm_f32x4_mul(vyGHIJ, vscale);
vyKLMN = wasm_f32x4_mul(vyKLMN, vscale);
vyOPQR = wasm_f32x4_mul(vyOPQR, vscale);
vySTUV = wasm_f32x4_mul(vySTUV, vscale);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
wasm_v128_store(output + 20, vyKLMN);
wasm_v128_store(output + 24, vyOPQR);
wasm_v128_store(output + 28, vySTUV);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vx = wasm_u16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy_lo = wasm_i32x4_extend_low_i16x8(vx);
v128_t vy_hi = wasm_i32x4_extend_high_i16x8(vx);
vy_lo = wasm_f32x4_convert_i32x4(vy_lo);
vy_hi = wasm_f32x4_convert_i32x4(vy_hi);
vy_lo = wasm_f32x4_mul(vy_lo, vscale);
vy_hi = wasm_f32x4_mul(vy_hi, vscale);
wasm_v128_store(output, vy_lo);
wasm_v128_store(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vx = wasm_u16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy = wasm_i32x4_extend_low_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store(output, vy); output += 4;
vy = wasm_i32x4_extend_high_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 4,438
| 33.952756
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-f32-vcvt/gen/qu8-f32-vcvt-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_f32_vcvt_ukernel__wasmsimd_x8(
size_t batch,
const uint8_t* input,
float* output,
const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vminus_zero_point = wasm_v128_load64_splat(params->wasmsimd.minus_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd.scale);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vx = wasm_u16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy_lo = wasm_i32x4_extend_low_i16x8(vx);
v128_t vy_hi = wasm_i32x4_extend_high_i16x8(vx);
vy_lo = wasm_f32x4_convert_i32x4(vy_lo);
vy_hi = wasm_f32x4_convert_i32x4(vy_hi);
vy_lo = wasm_f32x4_mul(vy_lo, vscale);
vy_hi = wasm_f32x4_mul(vy_hi, vscale);
wasm_v128_store(output, vy_lo);
wasm_v128_store(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vx = wasm_u16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy = wasm_i32x4_extend_low_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store(output, vy); output += 4;
vy = wasm_i32x4_extend_high_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,289
| 28.358974
| 93
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.