repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8dwconv/up8x9-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <immintrin.h>
#include <qnnpack/q8dwconv.h>
#include <requantization/runtime-sse2.h>
void pytorch_q8dwconv_ukernel_up8x9__sse2(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
size_t input_stride,
size_t output_increment,
const union pytorch_qnnp_conv_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
const __m128i va_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.input_zero_point);
const __m128i vkernel_zero_point = _mm_set1_epi16(
quantization_params->sse2.kernel_zero_points[0]);
const __m128i vzero = _mm_setzero_si128();
do {
const uint8_t* i0 = input[0];
const uint8_t* i1 = input[1];
const uint8_t* i2 = input[2];
const uint8_t* i3 = input[3];
const uint8_t* i4 = input[4];
const uint8_t* i5 = input[5];
const uint8_t* i6 = input[6];
const uint8_t* i7 = input[7];
const uint8_t* i8 = input[8];
input = (const uint8_t**)((uintptr_t)input + input_stride);
size_t c = channels;
const void* w = weights;
for (; c >= 8; c -= 8) {
__m128i vacc_lo = _mm_loadu_si128((const __m128i*)w);
__m128i vacc_hi = _mm_loadu_si128((const __m128i*)((uintptr_t)w + 16));
const __m128i vi0 = _mm_loadl_epi64((const __m128i*)i0);
i0 += 8;
const __m128i vxi0 =
sub_zero_point(_mm_unpacklo_epi8(vi0, vzero), va_zero_point);
const __m128i vk0 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 32));
const __m128i vxk0 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk0, vzero), vkernel_zero_point);
const __m128i vprod0_odd = _mm_mullo_epi16(vxi0, vxk0);
const __m128i vprod0_even = _mm_mulhi_epi16(vxi0, vxk0);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod0_odd, vprod0_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod0_odd, vprod0_even));
const __m128i vi1 = _mm_loadl_epi64((const __m128i*)i1);
i1 += 8;
const __m128i vxi1 =
sub_zero_point(_mm_unpacklo_epi8(vi1, vzero), va_zero_point);
const __m128i vk1 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 40));
const __m128i vxk1 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk1, vzero), vkernel_zero_point);
const __m128i vprod1_odd = _mm_mullo_epi16(vxi1, vxk1);
const __m128i vprod1_even = _mm_mulhi_epi16(vxi1, vxk1);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod1_odd, vprod1_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod1_odd, vprod1_even));
const __m128i vi2 = _mm_loadl_epi64((const __m128i*)i2);
i2 += 8;
const __m128i vxi2 =
sub_zero_point(_mm_unpacklo_epi8(vi2, vzero), va_zero_point);
const __m128i vk2 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 48));
const __m128i vxk2 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk2, vzero), vkernel_zero_point);
const __m128i vprod2_odd = _mm_mullo_epi16(vxi2, vxk2);
const __m128i vprod2_even = _mm_mulhi_epi16(vxi2, vxk2);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod2_odd, vprod2_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod2_odd, vprod2_even));
const __m128i vi3 = _mm_loadl_epi64((const __m128i*)i3);
i3 += 8;
const __m128i vxi3 =
sub_zero_point(_mm_unpacklo_epi8(vi3, vzero), va_zero_point);
const __m128i vk3 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 56));
const __m128i vxk3 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk3, vzero), vkernel_zero_point);
const __m128i vprod3_odd = _mm_mullo_epi16(vxi3, vxk3);
const __m128i vprod3_even = _mm_mulhi_epi16(vxi3, vxk3);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod3_odd, vprod3_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod3_odd, vprod3_even));
const __m128i vi4 = _mm_loadl_epi64((const __m128i*)i4);
i4 += 8;
const __m128i vxi4 =
sub_zero_point(_mm_unpacklo_epi8(vi4, vzero), va_zero_point);
const __m128i vk4 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 64));
const __m128i vxk4 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk4, vzero), vkernel_zero_point);
const __m128i vprod4_odd = _mm_mullo_epi16(vxi4, vxk4);
const __m128i vprod4_even = _mm_mulhi_epi16(vxi4, vxk4);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod4_odd, vprod4_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod4_odd, vprod4_even));
const __m128i vi5 = _mm_loadl_epi64((const __m128i*)i5);
i5 += 8;
const __m128i vxi5 =
sub_zero_point(_mm_unpacklo_epi8(vi5, vzero), va_zero_point);
const __m128i vk5 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 72));
const __m128i vxk5 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk5, vzero), vkernel_zero_point);
const __m128i vprod5_odd = _mm_mullo_epi16(vxi5, vxk5);
const __m128i vprod5_even = _mm_mulhi_epi16(vxi5, vxk5);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod5_odd, vprod5_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod5_odd, vprod5_even));
const __m128i vi6 = _mm_loadl_epi64((const __m128i*)i6);
i6 += 8;
const __m128i vxi6 =
sub_zero_point(_mm_unpacklo_epi8(vi6, vzero), va_zero_point);
const __m128i vk6 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 80));
const __m128i vxk6 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk6, vzero), vkernel_zero_point);
const __m128i vprod6_odd = _mm_mullo_epi16(vxi6, vxk6);
const __m128i vprod6_even = _mm_mulhi_epi16(vxi6, vxk6);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod6_odd, vprod6_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod6_odd, vprod6_even));
const __m128i vi7 = _mm_loadl_epi64((const __m128i*)i7);
i7 += 8;
const __m128i vxi7 =
sub_zero_point(_mm_unpacklo_epi8(vi7, vzero), va_zero_point);
const __m128i vk7 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 88));
const __m128i vxk7 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk7, vzero), vkernel_zero_point);
const __m128i vprod7_odd = _mm_mullo_epi16(vxi7, vxk7);
const __m128i vprod7_even = _mm_mulhi_epi16(vxi7, vxk7);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod7_odd, vprod7_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod7_odd, vprod7_even));
const __m128i vi8 = _mm_loadl_epi64((const __m128i*)i8);
i8 += 8;
const __m128i vxi8 =
sub_zero_point(_mm_unpacklo_epi8(vi8, vzero), va_zero_point);
const __m128i vk8 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 96));
const __m128i vxk8 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk8, vzero), vkernel_zero_point);
const __m128i vprod8_odd = _mm_mullo_epi16(vxi8, vxk8);
const __m128i vprod8_even = _mm_mulhi_epi16(vxi8, vxk8);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod8_odd, vprod8_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod8_odd, vprod8_even));
w = (void*)((uintptr_t)w + 104);
const __m128 vmultiplier =
_mm_set1_ps(quantization_params->sse2.requantization_scales[0]);
vacc_lo = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc_lo),
vmultiplier
)
);
vacc_hi = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc_hi),
vmultiplier
)
);
const __m128i voutput_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point);
__m128i vout =
_mm_adds_epi16(_mm_packs_epi32(vacc_lo, vacc_hi), voutput_zero_point);
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
_mm_storel_epi64((__m128i*)output, vout);
output += 8;
}
if (c != 0) {
const size_t i_predecrement = 8 - c;
const __m128i vi_shift = _mm_cvtsi32_si128(8 * i_predecrement);
i0 -= i_predecrement;
i1 -= i_predecrement;
i2 -= i_predecrement;
i3 -= i_predecrement;
i4 -= i_predecrement;
i5 -= i_predecrement;
i6 -= i_predecrement;
i7 -= i_predecrement;
i8 -= i_predecrement;
__m128i vacc_lo = _mm_loadu_si128((const __m128i*)w);
__m128i vacc_hi = _mm_loadu_si128((const __m128i*)((uintptr_t)w + 16));
const __m128i vi0 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i0), vi_shift);
const __m128i vxi0 =
sub_zero_point(_mm_unpacklo_epi8(vi0, vzero), va_zero_point);
const __m128i vk0 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 32));
const __m128i vxk0 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk0, vzero), vkernel_zero_point);
const __m128i vprod0_odd = _mm_mullo_epi16(vxi0, vxk0);
const __m128i vprod0_even = _mm_mulhi_epi16(vxi0, vxk0);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod0_odd, vprod0_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod0_odd, vprod0_even));
const __m128i vi1 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i1), vi_shift);
const __m128i vxi1 =
sub_zero_point(_mm_unpacklo_epi8(vi1, vzero), va_zero_point);
const __m128i vk1 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 40));
const __m128i vxk1 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk1, vzero), vkernel_zero_point);
const __m128i vprod1_odd = _mm_mullo_epi16(vxi1, vxk1);
const __m128i vprod1_even = _mm_mulhi_epi16(vxi1, vxk1);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod1_odd, vprod1_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod1_odd, vprod1_even));
const __m128i vi2 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i2), vi_shift);
const __m128i vxi2 =
sub_zero_point(_mm_unpacklo_epi8(vi2, vzero), va_zero_point);
const __m128i vk2 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 48));
const __m128i vxk2 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk2, vzero), vkernel_zero_point);
const __m128i vprod2_odd = _mm_mullo_epi16(vxi2, vxk2);
const __m128i vprod2_even = _mm_mulhi_epi16(vxi2, vxk2);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod2_odd, vprod2_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod2_odd, vprod2_even));
const __m128i vi3 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i3), vi_shift);
const __m128i vxi3 =
sub_zero_point(_mm_unpacklo_epi8(vi3, vzero), va_zero_point);
const __m128i vk3 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 56));
const __m128i vxk3 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk3, vzero), vkernel_zero_point);
const __m128i vprod3_odd = _mm_mullo_epi16(vxi3, vxk3);
const __m128i vprod3_even = _mm_mulhi_epi16(vxi3, vxk3);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod3_odd, vprod3_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod3_odd, vprod3_even));
const __m128i vi4 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i4), vi_shift);
const __m128i vxi4 =
sub_zero_point(_mm_unpacklo_epi8(vi4, vzero), va_zero_point);
const __m128i vk4 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 64));
const __m128i vxk4 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk4, vzero), vkernel_zero_point);
const __m128i vprod4_odd = _mm_mullo_epi16(vxi4, vxk4);
const __m128i vprod4_even = _mm_mulhi_epi16(vxi4, vxk4);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod4_odd, vprod4_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod4_odd, vprod4_even));
const __m128i vi5 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i5), vi_shift);
const __m128i vxi5 =
sub_zero_point(_mm_unpacklo_epi8(vi5, vzero), va_zero_point);
const __m128i vk5 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 72));
const __m128i vxk5 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk5, vzero), vkernel_zero_point);
const __m128i vprod5_odd = _mm_mullo_epi16(vxi5, vxk5);
const __m128i vprod5_even = _mm_mulhi_epi16(vxi5, vxk5);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod5_odd, vprod5_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod5_odd, vprod5_even));
const __m128i vi6 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i6), vi_shift);
const __m128i vxi6 =
sub_zero_point(_mm_unpacklo_epi8(vi6, vzero), va_zero_point);
const __m128i vk6 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 80));
const __m128i vxk6 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk6, vzero), vkernel_zero_point);
const __m128i vprod6_odd = _mm_mullo_epi16(vxi6, vxk6);
const __m128i vprod6_even = _mm_mulhi_epi16(vxi6, vxk6);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod6_odd, vprod6_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod6_odd, vprod6_even));
const __m128i vi7 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i7), vi_shift);
const __m128i vxi7 =
sub_zero_point(_mm_unpacklo_epi8(vi7, vzero), va_zero_point);
const __m128i vk7 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 88));
const __m128i vxk7 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk7, vzero), vkernel_zero_point);
const __m128i vprod7_odd = _mm_mullo_epi16(vxi7, vxk7);
const __m128i vprod7_even = _mm_mulhi_epi16(vxi7, vxk7);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod7_odd, vprod7_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod7_odd, vprod7_even));
const __m128i vi8 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i8), vi_shift);
const __m128i vxi8 =
sub_zero_point(_mm_unpacklo_epi8(vi8, vzero), va_zero_point);
const __m128i vk8 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 96));
const __m128i vxk8 =
_mm_sub_epi16(_mm_unpacklo_epi8(vk8, vzero), vkernel_zero_point);
const __m128i vprod8_odd = _mm_mullo_epi16(vxi8, vxk8);
const __m128i vprod8_even = _mm_mulhi_epi16(vxi8, vxk8);
vacc_lo =
_mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod8_odd, vprod8_even));
vacc_hi =
_mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod8_odd, vprod8_even));
const __m128 vmultiplier =
_mm_set1_ps(quantization_params->sse2.requantization_scales[0]);
vacc_lo = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc_lo),
vmultiplier
)
);
vacc_hi = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc_hi),
vmultiplier
)
);
const __m128i voutput_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point);
__m128i vout =
_mm_adds_epi16(_mm_packs_epi32(vacc_lo, vacc_hi), voutput_zero_point);
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
if (c & 4) {
*((uint32_t*)output) = (uint32_t)_mm_cvtsi128_si32(vout);
output += 4;
vout = _mm_srli_epi64(vout, 32);
}
if (c & 2) {
*((uint16_t*)output) = (uint16_t)_mm_extract_epi16(vout, 0);
output += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (c & 1) {
*((uint8_t*)output) = (uint8_t)_mm_cvtsi128_si32(vout);
output += 1;
}
}
output = (uint8_t*)((uintptr_t)output + output_increment);
} while (--output_width != 0);
}
| 17,062
| 41.979849
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gavgpool/mp8x7p7q-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <arm_neon.h>
#include <qnnpack/q8gavgpool.h>
void pytorch_q8gavgpool_ukernel_mp8x7p7q__neon(
size_t m,
size_t n,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union pytorch_qnnp_avgpool_quantization_params
quantization_params[restrict static 1]) {
assert(m > 7);
assert(n >= 8);
const uint8_t* i0 = input;
const uint8_t* i1 = i0 + input_stride;
const uint8_t* i2 = i1 + input_stride;
const uint8_t* i3 = i2 + input_stride;
const uint8_t* i4 = i3 + input_stride;
const uint8_t* i5 = i4 + input_stride;
const uint8_t* i6 = i5 + input_stride;
const size_t packed_n = (n + 7) & -8;
const size_t input_increment = 7 * input_stride - packed_n;
const int32x4_t vbias = vld1q_dup_s32(&quantization_params->neon.bias);
/* note: goes up to 7 elements over bound */
int32_t* acc = buffer;
for (size_t k = 0; k < n; k += 8) {
const uint8x8_t vi0 = vld1_u8(i0);
i0 += 8;
const uint8x8_t vi1 = vld1_u8(i1);
i1 += 8;
const uint8x8_t vi2 = vld1_u8(i2);
i2 += 8;
const uint8x8_t vi3 = vld1_u8(i3);
i3 += 8;
const uint8x8_t vi4 = vld1_u8(i4);
i4 += 8;
const uint8x8_t vi5 = vld1_u8(i5);
i5 += 8;
const uint8x8_t vi6 = vld1_u8(i6);
i6 += 8;
const int16x8_t vsum016 =
vreinterpretq_s16_u16(vaddw_u8(vaddl_u8(vi0, vi1), vi6));
const int16x8_t vsum23 = vreinterpretq_s16_u16(vaddl_u8(vi2, vi3));
const int16x8_t vsum45 = vreinterpretq_s16_u16(vaddl_u8(vi4, vi5));
int32x4_t vacc_lo = vaddw_s16(vbias, vget_low_s16(vsum23));
int32x4_t vacc_hi = vaddw_s16(vbias, vget_high_s16(vsum23));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum45));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum45));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum016));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum016));
vst1q_s32(acc, vacc_lo);
acc += 4;
vst1q_s32(acc, vacc_hi);
acc += 4;
}
for (m -= 7; m > 7; m -= 7) {
acc = buffer;
i0 = (const uint8_t*)((uintptr_t)i0 + input_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + input_increment);
i2 = (const uint8_t*)((uintptr_t)i2 + input_increment);
i3 = (const uint8_t*)((uintptr_t)i3 + input_increment);
i4 = (const uint8_t*)((uintptr_t)i4 + input_increment);
i5 = (const uint8_t*)((uintptr_t)i5 + input_increment);
i6 = (const uint8_t*)((uintptr_t)i6 + input_increment);
/* note: goes up to 7 elements over bound */
for (size_t k = 0; k < n; k += 8) {
const uint8x8_t vi0 = vld1_u8(i0);
i0 += 8;
const uint8x8_t vi1 = vld1_u8(i1);
i1 += 8;
const uint8x8_t vi2 = vld1_u8(i2);
i2 += 8;
const uint8x8_t vi3 = vld1_u8(i3);
i3 += 8;
const uint8x8_t vi4 = vld1_u8(i4);
i4 += 8;
const uint8x8_t vi5 = vld1_u8(i5);
i5 += 8;
const uint8x8_t vi6 = vld1_u8(i6);
i6 += 8;
int32x4_t vacc_lo = vld1q_s32(acc);
int32x4_t vacc_hi = vld1q_s32(acc + 4);
const int16x8_t vsum016 =
vreinterpretq_s16_u16(vaddw_u8(vaddl_u8(vi0, vi1), vi6));
const int16x8_t vsum23 = vreinterpretq_s16_u16(vaddl_u8(vi2, vi3));
const int16x8_t vsum45 = vreinterpretq_s16_u16(vaddl_u8(vi4, vi5));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum23));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum23));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum45));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum45));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum016));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum016));
vst1q_s32(acc, vacc_lo);
acc += 4;
vst1q_s32(acc, vacc_hi);
acc += 4;
}
}
const float32x4_t vscale =
vdupq_n_f32(quantization_params->neon.scale);
#if defined(__aarch64__)
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif
i0 = (const uint8_t*)((uintptr_t)i0 + input_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + input_increment);
if (m < 2) {
i1 = zero;
}
i2 = (const uint8_t*)((uintptr_t)i2 + input_increment);
if (m <= 2) {
i2 = zero;
}
i3 = (const uint8_t*)((uintptr_t)i3 + input_increment);
if (m < 4) {
i3 = zero;
}
i4 = (const uint8_t*)((uintptr_t)i4 + input_increment);
if (m <= 4) {
i4 = zero;
}
i5 = (const uint8_t*)((uintptr_t)i5 + input_increment);
if (m < 6) {
i5 = zero;
}
i6 = (const uint8_t*)((uintptr_t)i6 + input_increment);
if (m <= 6) {
i6 = zero;
}
acc = buffer;
do {
const uint8x8_t vi0 = vld1_u8(i0);
i0 += 8;
const uint8x8_t vi1 = vld1_u8(i1);
i1 += 8;
const uint8x8_t vi2 = vld1_u8(i2);
i2 += 8;
const uint8x8_t vi3 = vld1_u8(i3);
i3 += 8;
const uint8x8_t vi4 = vld1_u8(i4);
i4 += 8;
const uint8x8_t vi5 = vld1_u8(i5);
i5 += 8;
const uint8x8_t vi6 = vld1_u8(i6);
i6 += 8;
int32x4_t vacc_lo = vld1q_s32(acc);
acc += 4;
int32x4_t vacc_hi = vld1q_s32(acc);
acc += 4;
const int16x8_t vsum016 =
vreinterpretq_s16_u16(vaddw_u8(vaddl_u8(vi0, vi1), vi6));
const int16x8_t vsum23 = vreinterpretq_s16_u16(vaddl_u8(vi2, vi3));
const int16x8_t vsum45 = vreinterpretq_s16_u16(vaddl_u8(vi4, vi5));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum23));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum23));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum45));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum45));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum016));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum016));
float32x4_t vacc_lo_f = vcvtq_f32_s32(vacc_lo);
float32x4_t vacc_hi_f = vcvtq_f32_s32(vacc_hi);
vacc_lo_f = vmulq_f32(vacc_lo_f, vscale);
vacc_hi_f = vmulq_f32(vacc_hi_f, vscale);
#if defined(__aarch64__)
vacc_lo = vcvtnq_s32_f32(vacc_lo_f);
vacc_hi = vcvtnq_s32_f32(vacc_hi_f);
const int16x8_t vacc = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
#else
vacc_lo_f = vminq_f32(vmaxq_f32(vacc_lo_f, vfmin), vfmax);
vacc_hi_f = vminq_f32(vmaxq_f32(vacc_hi_f, vfmin), vfmax);
vacc_lo = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_lo_f, vfmagic)), vimagic);
vacc_hi = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_hi_f, vfmagic)), vimagic);
const int16x8_t vacc =
vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vout = vqmovun_s16(vacc);
#endif
vst1_u8(output, vout);
output += 8;
n -= 8;
} while (n >= 8);
if (n != 0) {
const size_t address_increment = n - 8;
i0 = (const uint8_t*)((uintptr_t)i0 + address_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + address_increment);
i2 = (const uint8_t*)((uintptr_t)i2 + address_increment);
i3 = (const uint8_t*)((uintptr_t)i3 + address_increment);
i4 = (const uint8_t*)((uintptr_t)i4 + address_increment);
i5 = (const uint8_t*)((uintptr_t)i5 + address_increment);
i6 = (const uint8_t*)((uintptr_t)i6 + address_increment);
const int64x1_t vshift = vmov_n_s64(8 * address_increment);
const uint8x8_t vi0 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i0)), vshift));
const uint8x8_t vi1 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i1)), vshift));
const uint8x8_t vi2 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i2)), vshift));
const uint8x8_t vi3 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i3)), vshift));
const uint8x8_t vi4 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i4)), vshift));
const uint8x8_t vi5 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i5)), vshift));
const uint8x8_t vi6 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i6)), vshift));
int32x4_t vacc_lo = vld1q_s32(acc);
acc += 4;
int32x4_t vacc_hi = vld1q_s32(acc);
const int16x8_t vsum016 =
vreinterpretq_s16_u16(vaddw_u8(vaddl_u8(vi0, vi1), vi6));
const int16x8_t vsum23 = vreinterpretq_s16_u16(vaddl_u8(vi2, vi3));
const int16x8_t vsum45 = vreinterpretq_s16_u16(vaddl_u8(vi4, vi5));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum23));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum23));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum45));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum45));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum016));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum016));
float32x4_t vacc_lo_f = vcvtq_f32_s32(vacc_lo);
float32x4_t vacc_hi_f = vcvtq_f32_s32(vacc_hi);
vacc_lo_f = vmulq_f32(vacc_lo_f, vscale);
vacc_hi_f = vmulq_f32(vacc_hi_f, vscale);
#if defined(__aarch64__)
vacc_lo = vcvtnq_s32_f32(vacc_lo_f);
vacc_hi = vcvtnq_s32_f32(vacc_hi_f);
const int16x8_t vacc = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
#else
vacc_lo_f = vminq_f32(vmaxq_f32(vacc_lo_f, vfmin), vfmax);
vacc_hi_f = vminq_f32(vmaxq_f32(vacc_hi_f, vfmin), vfmax);
vacc_lo = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_lo_f, vfmagic)), vimagic);
vacc_hi = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_hi_f, vfmagic)), vimagic);
const int16x8_t vacc =
vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vout = vqmovun_s16(vacc);
#endif
if (n & 4) {
vst1_lane_u32(
__builtin_assume_aligned(output, 1), vreinterpret_u32_u8(vout), 0);
output += 4;
vout = vext_u8(vout, vout, 4);
}
if (n & 2) {
vst1_lane_u16(
__builtin_assume_aligned(output, 1), vreinterpret_u16_u8(vout), 0);
output += 2;
vout = vext_u8(vout, vout, 2);
}
if (n & 1) {
vst1_lane_u8(output, vout, 0);
}
}
}
| 10,890
| 33.907051
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gavgpool/mp8x7p7q-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <emmintrin.h>
#include <qnnpack/q8gavgpool.h>
void pytorch_q8gavgpool_ukernel_mp8x7p7q__sse2(
size_t m,
size_t n,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
const union pytorch_qnnp_avgpool_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
assert(m > 7);
assert(n >= 8);
const uint8_t* i0 = input;
const uint8_t* i1 = i0 + input_stride;
const uint8_t* i2 = i1 + input_stride;
const uint8_t* i3 = i2 + input_stride;
const uint8_t* i4 = i3 + input_stride;
const uint8_t* i5 = i4 + input_stride;
const uint8_t* i6 = i5 + input_stride;
const size_t packed_n = (n + 7) & -8;
const size_t input_increment = 7 * input_stride - packed_n;
const __m128i vbias =
_mm_load_si128((const __m128i*)&quantization_params->sse2.bias);
const __m128i vzero = _mm_setzero_si128();
/* note: goes up to 7 elements over bound */
int32_t* acc = buffer;
for (size_t k = 0; k < n; k += 8) {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*)i0);
i0 += 8;
const __m128i vi1 = _mm_loadl_epi64((const __m128i*)i1);
i1 += 8;
const __m128i vi2 = _mm_loadl_epi64((const __m128i*)i2);
i2 += 8;
const __m128i vi3 = _mm_loadl_epi64((const __m128i*)i3);
i3 += 8;
const __m128i vi4 = _mm_loadl_epi64((const __m128i*)i4);
i4 += 8;
const __m128i vi5 = _mm_loadl_epi64((const __m128i*)i5);
i5 += 8;
const __m128i vi6 = _mm_loadl_epi64((const __m128i*)i6);
i6 += 8;
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
__m128i vacc_lo = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vxi0, vzero));
__m128i vacc_hi = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vxi0, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi1, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi1, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi2, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi2, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi3, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi3, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi4, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi4, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi5, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi5, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi6, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi6, vzero));
_mm_store_si128((__m128i*)acc, vacc_lo);
_mm_store_si128((__m128i*)acc + 1, vacc_hi);
acc += 8;
}
for (m -= 7; m > 7; m -= 7) {
acc = buffer;
i0 = (const uint8_t*)((uintptr_t)i0 + input_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + input_increment);
i2 = (const uint8_t*)((uintptr_t)i2 + input_increment);
i3 = (const uint8_t*)((uintptr_t)i3 + input_increment);
i4 = (const uint8_t*)((uintptr_t)i4 + input_increment);
i5 = (const uint8_t*)((uintptr_t)i5 + input_increment);
i6 = (const uint8_t*)((uintptr_t)i6 + input_increment);
/* note: goes up to 7 elements over bound */
for (size_t k = 0; k < n; k += 8) {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*)i0);
i0 += 8;
const __m128i vi1 = _mm_loadl_epi64((const __m128i*)i1);
i1 += 8;
const __m128i vi2 = _mm_loadl_epi64((const __m128i*)i2);
i2 += 8;
const __m128i vi3 = _mm_loadl_epi64((const __m128i*)i3);
i3 += 8;
const __m128i vi4 = _mm_loadl_epi64((const __m128i*)i4);
i4 += 8;
const __m128i vi5 = _mm_loadl_epi64((const __m128i*)i5);
i5 += 8;
const __m128i vi6 = _mm_loadl_epi64((const __m128i*)i6);
i6 += 8;
__m128i vacc_lo = _mm_load_si128((const __m128i*)acc);
__m128i vacc_hi = _mm_load_si128((const __m128i*)acc + 1);
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi0, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi0, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi1, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi1, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi2, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi2, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi3, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi3, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi4, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi4, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi5, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi5, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi6, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi6, vzero));
_mm_store_si128((__m128i*)acc, vacc_lo);
_mm_store_si128((__m128i*)acc + 1, vacc_hi);
acc += 8;
}
}
const __m128 vscale = _mm_loadu_ps(quantization_params->sse2.scale);
i0 = (const uint8_t*)((uintptr_t)i0 + input_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + input_increment);
if (m < 2) {
i1 = zero;
}
i2 = (const uint8_t*)((uintptr_t)i2 + input_increment);
if (m <= 2) {
i2 = zero;
}
i3 = (const uint8_t*)((uintptr_t)i3 + input_increment);
if (m < 4) {
i3 = zero;
}
i4 = (const uint8_t*)((uintptr_t)i4 + input_increment);
if (m <= 4) {
i4 = zero;
}
i5 = (const uint8_t*)((uintptr_t)i5 + input_increment);
if (m < 6) {
i5 = zero;
}
i6 = (const uint8_t*)((uintptr_t)i6 + input_increment);
if (m <= 6) {
i6 = zero;
}
acc = buffer;
do {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*)i0);
i0 += 8;
const __m128i vi1 = _mm_loadl_epi64((const __m128i*)i1);
i1 += 8;
const __m128i vi2 = _mm_loadl_epi64((const __m128i*)i2);
i2 += 8;
const __m128i vi3 = _mm_loadl_epi64((const __m128i*)i3);
i3 += 8;
const __m128i vi4 = _mm_loadl_epi64((const __m128i*)i4);
i4 += 8;
const __m128i vi5 = _mm_loadl_epi64((const __m128i*)i5);
i5 += 8;
const __m128i vi6 = _mm_loadl_epi64((const __m128i*)i6);
i6 += 8;
__m128i vacc_lo = _mm_load_si128((const __m128i*)acc);
__m128i vacc_hi = _mm_load_si128((const __m128i*)acc + 1);
acc += 8;
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi0, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi0, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi1, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi1, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi2, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi2, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi3, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi3, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi4, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi4, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi5, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi5, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi6, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi6, vzero));
const __m128 vacc_lo_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_lo), vscale);
const __m128 vacc_hi_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_hi), vscale);
const __m128i vscaled_lo = _mm_cvtps_epi32(vacc_lo_f);
const __m128i vscaled_hi = _mm_cvtps_epi32(vacc_hi_f);
__m128i vout = _mm_packs_epi32(vscaled_lo, vscaled_hi);
vout = _mm_adds_epi16(
vout,
_mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point));
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
_mm_storel_epi64((__m128i*)output, vout);
output += 8;
n -= 8;
} while (n >= 8);
if (n != 0) {
const size_t address_decrement = 8 - n;
i0 = (const uint8_t*)((uintptr_t)i0 - address_decrement);
i1 = (const uint8_t*)((uintptr_t)i1 - address_decrement);
i2 = (const uint8_t*)((uintptr_t)i2 - address_decrement);
i3 = (const uint8_t*)((uintptr_t)i3 - address_decrement);
i4 = (const uint8_t*)((uintptr_t)i4 - address_decrement);
i5 = (const uint8_t*)((uintptr_t)i5 - address_decrement);
i6 = (const uint8_t*)((uintptr_t)i6 - address_decrement);
const __m128i vi_shift = _mm_cvtsi32_si128(8 * address_decrement);
const __m128i vi0 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i0), vi_shift);
const __m128i vi1 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i1), vi_shift);
const __m128i vi2 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i2), vi_shift);
const __m128i vi3 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i3), vi_shift);
const __m128i vi4 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i4), vi_shift);
const __m128i vi5 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i5), vi_shift);
const __m128i vi6 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i6), vi_shift);
__m128i vacc_lo = _mm_load_si128((const __m128i*)acc);
__m128i vacc_hi = _mm_load_si128((const __m128i*)acc + 1);
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi0, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi0, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi1, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi1, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi2, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi2, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi3, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi3, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi4, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi4, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi5, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi5, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi6, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi6, vzero));
const __m128 vacc_lo_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_lo), vscale);
const __m128 vacc_hi_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_hi), vscale);
const __m128i vscaled_lo = _mm_cvtps_epi32(vacc_lo_f);
const __m128i vscaled_hi = _mm_cvtps_epi32(vacc_hi_f);
__m128i vout = _mm_packs_epi32(vscaled_lo, vscaled_hi);
vout = _mm_adds_epi16(
vout,
_mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point));
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
if (n & 4) {
*((uint32_t*)output) = (uint32_t)_mm_cvtsi128_si32(vout);
output += 4;
vout = _mm_srli_epi64(vout, 32);
}
if (n & 2) {
*((uint16_t*)output) = (uint16_t)_mm_extract_epi16(vout, 0);
output += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (n & 1) {
*((uint8_t*)output) = (uint8_t)_mm_cvtsi128_si32(vout);
}
}
}
| 13,544
| 40.805556
| 78
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gavgpool/up8x7-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <arm_neon.h>
#include <qnnpack/q8gavgpool.h>
void pytorch_q8gavgpool_ukernel_up8x7__neon(
size_t m,
size_t n,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union pytorch_qnnp_avgpool_quantization_params
quantization_params[restrict static 1]) {
assert(m >= 1);
assert(m <= 7);
assert(n >= 8);
const uint8_t* i0 = input;
const uint8_t* i1 = i0 + input_stride;
if (m < 2) {
i1 = zero;
}
const uint8_t* i2 = i1 + input_stride;
if (m <= 2) {
i2 = zero;
}
const uint8_t* i3 = i2 + input_stride;
if (m < 4) {
i3 = zero;
}
const uint8_t* i4 = i3 + input_stride;
if (m <= 4) {
i4 = zero;
}
const uint8_t* i5 = i4 + input_stride;
if (m < 6) {
i5 = zero;
}
const uint8_t* i6 = i5 + input_stride;
if (m <= 6) {
i6 = zero;
}
const int32x4_t vbias = vld1q_dup_s32(&quantization_params->neon.bias);
const float32x4_t vscale = vdupq_n_f32(quantization_params->neon.scale);
#if defined(__aarch64__)
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
#endif
do {
const uint8x8_t vi0 = vld1_u8(i0);
i0 += 8;
const uint8x8_t vi1 = vld1_u8(i1);
i1 += 8;
const uint8x8_t vi2 = vld1_u8(i2);
i2 += 8;
const uint8x8_t vi3 = vld1_u8(i3);
i3 += 8;
const uint8x8_t vi4 = vld1_u8(i4);
i4 += 8;
const uint8x8_t vi5 = vld1_u8(i5);
i5 += 8;
const uint8x8_t vi6 = vld1_u8(i6);
i6 += 8;
const int16x8_t vsum016 =
vreinterpretq_s16_u16(vaddw_u8(vaddl_u8(vi0, vi1), vi6));
const int16x8_t vsum23 = vreinterpretq_s16_u16(vaddl_u8(vi2, vi3));
const int16x8_t vsum45 = vreinterpretq_s16_u16(vaddl_u8(vi4, vi5));
int32x4_t vacc_lo = vaddw_s16(vbias, vget_low_s16(vsum23));
int32x4_t vacc_hi = vaddw_s16(vbias, vget_high_s16(vsum23));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum45));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum45));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum016));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum016));
float32x4_t vacc_lo_f = vcvtq_f32_s32(vacc_lo);
float32x4_t vacc_hi_f = vcvtq_f32_s32(vacc_hi);
vacc_lo_f = vmulq_f32(vacc_lo_f, vscale);
vacc_hi_f = vmulq_f32(vacc_hi_f, vscale);
#if defined(__aarch64__)
vacc_lo = vcvtnq_s32_f32(vacc_lo_f);
vacc_hi = vcvtnq_s32_f32(vacc_hi_f);
const int16x8_t vacc = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
#else
vacc_lo_f = vminq_f32(vmaxq_f32(vacc_lo_f, vfmin), vfmax);
vacc_hi_f = vminq_f32(vmaxq_f32(vacc_hi_f, vfmin), vfmax);
vacc_lo = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_lo_f, vfmagic)), vimagic);
vacc_hi = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_hi_f, vfmagic)), vimagic);
const int16x8_t vacc =
vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vout = vqmovun_s16(vacc);
#endif
vst1_u8(output, vout);
output += 8;
n -= 8;
} while (n >= 8);
if (n != 0) {
const size_t address_increment = n - 8;
i0 = (const uint8_t*)((uintptr_t)i0 + address_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + address_increment);
i2 = (const uint8_t*)((uintptr_t)i2 + address_increment);
i3 = (const uint8_t*)((uintptr_t)i3 + address_increment);
i4 = (const uint8_t*)((uintptr_t)i4 + address_increment);
i5 = (const uint8_t*)((uintptr_t)i5 + address_increment);
i6 = (const uint8_t*)((uintptr_t)i6 + address_increment);
const int64x1_t vshift = vmov_n_s64(8 * address_increment);
const uint8x8_t vi0 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i0)), vshift));
const uint8x8_t vi1 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i1)), vshift));
const uint8x8_t vi2 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i2)), vshift));
const uint8x8_t vi3 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i3)), vshift));
const uint8x8_t vi4 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i4)), vshift));
const uint8x8_t vi5 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i5)), vshift));
const uint8x8_t vi6 =
vreinterpret_u8_u64(vshl_u64(vreinterpret_u64_u8(vld1_u8(i6)), vshift));
const int16x8_t vsum016 =
vreinterpretq_s16_u16(vaddw_u8(vaddl_u8(vi0, vi1), vi6));
const int16x8_t vsum23 = vreinterpretq_s16_u16(vaddl_u8(vi2, vi3));
const int16x8_t vsum45 = vreinterpretq_s16_u16(vaddl_u8(vi4, vi5));
int32x4_t vacc_lo = vaddw_s16(vbias, vget_low_s16(vsum23));
int32x4_t vacc_hi = vaddw_s16(vbias, vget_high_s16(vsum23));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum45));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum45));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum016));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum016));
float32x4_t vacc_lo_f = vcvtq_f32_s32(vacc_lo);
float32x4_t vacc_hi_f = vcvtq_f32_s32(vacc_hi);
vacc_lo_f = vmulq_f32(vacc_lo_f, vscale);
vacc_hi_f = vmulq_f32(vacc_hi_f, vscale);
#if defined(__aarch64__)
vacc_lo = vcvtnq_s32_f32(vacc_lo_f);
vacc_hi = vcvtnq_s32_f32(vacc_hi_f);
const int16x8_t vacc = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
#else
vacc_lo_f = vminq_f32(vmaxq_f32(vacc_lo_f, vfmin), vfmax);
vacc_hi_f = vminq_f32(vmaxq_f32(vacc_hi_f, vfmin), vfmax);
vacc_lo = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_lo_f, vfmagic)), vimagic);
vacc_hi = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_hi_f, vfmagic)), vimagic);
const int16x8_t vacc =
vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vout = vqmovun_s16(vacc);
#endif
if (n & 4) {
vst1_lane_u32(
__builtin_assume_aligned(output, 1), vreinterpret_u32_u8(vout), 0);
output += 4;
vout = vext_u8(vout, vout, 4);
}
if (n & 2) {
vst1_lane_u16(
__builtin_assume_aligned(output, 1), vreinterpret_u16_u8(vout), 0);
output += 2;
vout = vext_u8(vout, vout, 2);
}
if (n & 1) {
vst1_lane_u8(output, vout, 0);
}
}
}
| 7,272
| 33.469194
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gavgpool/up8x7-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <emmintrin.h>
#include <qnnpack/q8gavgpool.h>
void pytorch_q8gavgpool_ukernel_up8x7__sse2(
size_t m,
size_t n,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union pytorch_qnnp_avgpool_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
assert(m >= 1);
assert(m <= 7);
assert(n >= 8);
const uint8_t* i0 = input;
const uint8_t* i1 = i0 + input_stride;
if (m < 2) {
i1 = zero;
}
const uint8_t* i2 = i1 + input_stride;
if (m <= 2) {
i2 = zero;
}
const uint8_t* i3 = i2 + input_stride;
if (m < 4) {
i3 = zero;
}
const uint8_t* i4 = i3 + input_stride;
if (m <= 4) {
i4 = zero;
}
const uint8_t* i5 = i4 + input_stride;
if (m < 6) {
i5 = zero;
}
const uint8_t* i6 = i5 + input_stride;
if (m <= 6) {
i6 = zero;
}
const __m128i vbias =
_mm_load_si128((const __m128i*)&quantization_params->sse2.bias);
const __m128i vzero = _mm_setzero_si128();
const __m128 vscale = _mm_loadu_ps(quantization_params->sse2.scale);
do {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*)i0);
i0 += 8;
const __m128i vi1 = _mm_loadl_epi64((const __m128i*)i1);
i1 += 8;
const __m128i vi2 = _mm_loadl_epi64((const __m128i*)i2);
i2 += 8;
const __m128i vi3 = _mm_loadl_epi64((const __m128i*)i3);
i3 += 8;
const __m128i vi4 = _mm_loadl_epi64((const __m128i*)i4);
i4 += 8;
const __m128i vi5 = _mm_loadl_epi64((const __m128i*)i5);
i5 += 8;
const __m128i vi6 = _mm_loadl_epi64((const __m128i*)i6);
i6 += 8;
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
__m128i vacc_lo = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vxi0, vzero));
__m128i vacc_hi = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vxi0, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi1, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi1, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi2, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi2, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi3, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi3, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi4, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi4, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi5, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi5, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi6, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi6, vzero));
const __m128 vacc_lo_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_lo), vscale);
const __m128 vacc_hi_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_hi), vscale);
const __m128i vscaled_lo = _mm_cvtps_epi32(vacc_lo_f);
const __m128i vscaled_hi = _mm_cvtps_epi32(vacc_hi_f);
__m128i vout = _mm_packs_epi32(vscaled_lo, vscaled_hi);
vout = _mm_adds_epi16(
vout,
_mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point));
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
_mm_storel_epi64((__m128i*)output, vout);
output += 8;
n -= 8;
} while (n >= 8);
if (n != 0) {
const size_t address_decrement = 8 - n;
i0 = (const uint8_t*)((uintptr_t)i0 - address_decrement);
i1 = (const uint8_t*)((uintptr_t)i1 - address_decrement);
i2 = (const uint8_t*)((uintptr_t)i2 - address_decrement);
i3 = (const uint8_t*)((uintptr_t)i3 - address_decrement);
i4 = (const uint8_t*)((uintptr_t)i4 - address_decrement);
i5 = (const uint8_t*)((uintptr_t)i5 - address_decrement);
i6 = (const uint8_t*)((uintptr_t)i6 - address_decrement);
const __m128i vi_shift = _mm_cvtsi32_si128(8 * address_decrement);
const __m128i vi0 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i0), vi_shift);
const __m128i vi1 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i1), vi_shift);
const __m128i vi2 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i2), vi_shift);
const __m128i vi3 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i3), vi_shift);
const __m128i vi4 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i4), vi_shift);
const __m128i vi5 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i5), vi_shift);
const __m128i vi6 =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)i6), vi_shift);
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
__m128i vacc_lo = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vxi0, vzero));
__m128i vacc_hi = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vxi0, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi1, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi1, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi2, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi2, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi3, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi3, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi4, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi4, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi5, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi5, vzero));
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vxi6, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vxi6, vzero));
const __m128 vacc_lo_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_lo), vscale);
const __m128 vacc_hi_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_hi), vscale);
const __m128i vscaled_lo = _mm_cvtps_epi32(vacc_lo_f);
const __m128i vscaled_hi = _mm_cvtps_epi32(vacc_hi_f);
__m128i vout = _mm_packs_epi32(vscaled_lo, vscaled_hi);
vout = _mm_adds_epi16(
vout,
_mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point));
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
if (n & 4) {
*((uint32_t*)output) = (uint32_t)_mm_cvtsi128_si32(vout);
output += 4;
vout = _mm_srli_epi64(vout, 32);
}
if (n & 2) {
*((uint16_t*)output) = (uint16_t)_mm_extract_epi16(vout, 0);
output += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (n & 1) {
*((uint8_t*)output) = (uint8_t)_mm_cvtsi128_si32(vout);
}
}
}
| 7,851
| 37.302439
| 78
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gavgpool/up8xm-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <arm_neon.h>
#include <qnnpack/q8gavgpool.h>
void pytorch_q8gavgpool_ukernel_up8xm__neon(
size_t m,
size_t n,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union pytorch_qnnp_avgpool_quantization_params
quantization_params[restrict static 1]) {
assert(m >= 1);
assert(n < 8);
const int32x4_t vbias = vld1q_dup_s32(&quantization_params->neon.bias);
int32x4_t vacc_lo = vbias;
int32x4_t vacc_hi = vbias;
while (m >= 8) {
const uint8x8_t vinput = vld1_u8(input);
input += input_stride;
const int16x8_t vxinput = vreinterpretq_s16_u16(vmovl_u8(vinput));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vxinput));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vxinput));
m--;
}
while (m-- != 0) {
input += n;
uint8x8_t vinput = vmov_n_u8(0);
if (n & 1) {
input -= 1;
vinput = vld1_lane_u8(input, vinput, 0);
}
if (n & 2) {
vinput = vext_u8(vinput, vinput, 6);
input -= 2;
vinput = vreinterpret_u8_u16(vld1_lane_u16(
__builtin_assume_aligned(input, 1), vreinterpret_u16_u8(vinput), 0));
}
if (n & 4) {
vinput = vext_u8(vinput, vinput, 4);
input -= 4;
vinput = vreinterpret_u8_u32(vld1_lane_u32(
__builtin_assume_aligned(input, 1), vreinterpret_u32_u8(vinput), 0));
}
input += input_stride;
const int16x8_t vxinput = vreinterpretq_s16_u16(vmovl_u8(vinput));
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vxinput));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vxinput));
}
const float32x4_t vscale =
vdupq_n_f32(quantization_params->neon.scale);
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
float32x4_t vacc_lo_f = vcvtq_f32_s32(vacc_lo);
float32x4_t vacc_hi_f = vcvtq_f32_s32(vacc_hi);
vacc_lo_f = vmulq_f32(vacc_lo_f, vscale);
vacc_hi_f = vmulq_f32(vacc_hi_f, vscale);
#if defined(__aarch64__)
const uint8x8_t voutput_min =
vld1_dup_u8(&quantization_params->neon.output_min);
const uint8x8_t voutput_max =
vld1_dup_u8(&quantization_params->neon.output_max);
vacc_lo = vcvtnq_s32_f32(vacc_lo_f);
vacc_hi = vcvtnq_s32_f32(vacc_hi_f);
const int16x8_t vacc = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), voutput_zero_point);
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
vacc_lo_f = vminq_f32(vmaxq_f32(vacc_lo_f, vfmin), vfmax);
vacc_hi_f = vminq_f32(vmaxq_f32(vacc_hi_f, vfmin), vfmax);
vacc_lo = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_lo_f, vfmagic)), vimagic);
vacc_hi = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc_hi_f, vfmagic)), vimagic);
const int16x8_t vacc =
vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vout = vqmovun_s16(vacc);
#endif
if (n & 4) {
vst1_lane_u32(
__builtin_assume_aligned(output, 1), vreinterpret_u32_u8(vout), 0);
output += 4;
vout = vext_u8(vout, vout, 4);
}
if (n & 2) {
vst1_lane_u16(
__builtin_assume_aligned(output, 1), vreinterpret_u16_u8(vout), 0);
output += 2;
vout = vext_u8(vout, vout, 2);
}
if (n & 1) {
vst1_lane_u8(output, vout, 0);
}
}
| 3,865
| 30.430894
| 79
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gavgpool/up8xm-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <emmintrin.h>
#include <qnnpack/q8gavgpool.h>
void pytorch_q8gavgpool_ukernel_up8xm__sse2(
size_t m,
size_t n,
const uint8_t* input,
size_t input_stride,
const uint8_t* zero,
uint8_t* output,
const union pytorch_qnnp_avgpool_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
assert(m >= 1);
assert(n < 8);
const __m128i vbias =
_mm_loadu_si128((const __m128i*)&quantization_params->sse2.bias);
__m128i vacc_lo = vbias;
__m128i vacc_hi = vbias;
__m128i vzero = _mm_setzero_si128();
while (m >= 8) {
const __m128i vinput = _mm_loadl_epi64((const __m128i*)input);
const __m128i vxinput = _mm_unpacklo_epi8(vinput, vzero);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi8(vxinput, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi8(vxinput, vzero));
input += input_stride;
m--;
}
while (m-- != 0) {
input += n;
__m128i vinput = _mm_setzero_si128();
if (n & 1) {
input -= 1;
vinput = _mm_cvtsi32_si128((int)(uint32_t)*input);
}
if (n & 2) {
vinput = _mm_slli_epi32(vinput, 16);
input -= 2;
vinput = _mm_insert_epi16(vinput, *((const uint16_t*)input), 0);
}
if (n & 4) {
input -= 4;
vinput = _mm_unpacklo_epi32(
_mm_cvtsi32_si128((int)*((const uint32_t*)input)), vinput);
}
input += input_stride;
const __m128i vxinput = _mm_unpacklo_epi8(vinput, vzero);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi8(vxinput, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi8(vxinput, vzero));
}
const __m128 vscale = _mm_loadu_ps(quantization_params->sse2.scale);
const __m128 vacc_lo_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_lo), vscale);
const __m128 vacc_hi_f = _mm_mul_ps(_mm_cvtepi32_ps(vacc_hi), vscale);
const __m128i vscaled_lo = _mm_cvtps_epi32(vacc_lo_f);
const __m128i vscaled_hi = _mm_cvtps_epi32(vacc_hi_f);
__m128i vout = _mm_packs_epi32(vscaled_lo, vscaled_hi);
vout = _mm_adds_epi16(
vout,
_mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point));
vout = _mm_packus_epi16(vout, vout);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
if (n & 4) {
*((uint32_t*)output) = (uint32_t)_mm_cvtsi128_si32(vout);
output += 4;
vout = _mm_srli_epi64(vout, 32);
}
if (n & 2) {
*((uint16_t*)output) = (uint16_t)_mm_extract_epi16(vout, 0);
output += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (n & 1) {
*((uint8_t*)output) = (uint8_t)_mm_cvtsi128_si32(vout);
}
}
| 2,995
| 28.96
| 76
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/2x4c8-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <immintrin.h>
#include <qnnpack/q8gemm.h>
#include <requantization/runtime-sse2.h>
static inline __m128i pytorch_sse_reduce4_i32(
__m128i x,
__m128i y,
__m128i z,
__m128i w) {
#if defined(__SSSE3__) && !defined(__ANDROID__)
/* xxyy = ( y2 + y3, y0 + y1, x2 + x3, x0 + x1 ) */
const __m128i xxyy = _mm_hadd_epi32(x, y);
/* zzww = ( w2 + w3, w0 + w1, z2 + z3, z0 + z1 ) */
const __m128i zzww = _mm_hadd_epi32(z, w);
/* xyzw = ( w0 + w1 + w2 + w3, y0 + y1 + y2 + y3, z0 + z1 + z2 + z3, x0 + x1 +
* x2 + x3 ) */
return _mm_hadd_epi32(xxyy, zzww);
#else
/* xzxz = ( z1 + z3, x1 + x3, z0 + z2, x0 + x2 ) */
const __m128i xzxz =
_mm_add_epi32(_mm_unpacklo_epi32(x, z), _mm_unpackhi_epi32(x, z));
/* ywyw = ( w1 + w3, y1 + y3, w0 + w2, y0 + y2 ) */
const __m128i ywyw =
_mm_add_epi32(_mm_unpacklo_epi32(y, w), _mm_unpackhi_epi32(y, w));
/* xyzw = ( w0 + w2 + w1 + w3, y0 + y2 + y1 + y3, z0 + z2 + z1 + z3, x0 + x2 +
* x1 + x3 ) */
return _mm_add_epi32(
_mm_unpacklo_epi32(xzxz, ywyw), _mm_unpackhi_epi32(xzxz, ywyw));
#endif
}
void pytorch_q8gemm_ukernel_2x4c8__sse2(
size_t mr,
size_t nr,
size_t k,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t c_stride,
size_t output_channel_index,
const union pytorch_qnnp_conv_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
__m128i vacc00 = _mm_cvtsi32_si128((int)((const int32_t*)w)[0]);
__m128i vacc01 = _mm_cvtsi32_si128((int)((const int32_t*)w)[1]);
__m128i vacc02 = _mm_cvtsi32_si128((int)((const int32_t*)w)[2]);
__m128i vacc03 = _mm_cvtsi32_si128((int)((const int32_t*)w)[3]);
__m128i vacc10 = vacc00;
__m128i vacc11 = vacc01;
__m128i vacc12 = vacc02;
__m128i vacc13 = vacc03;
w = (const void*)((uintptr_t)w + 16);
const uint8_t* a0 = a;
const uint8_t* a1 = (const uint8_t*)((uintptr_t)a0 + a_stride);
if (mr != 2) {
a1 = a0;
}
const uint8_t* b0 = w;
const uint8_t* b1 = b0 + 8;
if (nr < 2) {
b1 = b0;
}
const uint8_t* b2 = b1 + 8;
if (nr <= 2) {
b2 = b1;
}
const uint8_t* b3 = b2 + 8;
if (nr != 4) {
b3 = b2;
}
const size_t b_stride = nr * 8;
const __m128i va_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.input_zero_point);
const __m128i vb_zero_point_0 = _mm_set1_epi16(
(int16_t)(uint16_t)quantization_params->sse2.kernel_zero_points[
output_channel_index]);
// Assumes kernel_zero_point allocated memory is always multiple of nr=4.
const __m128i vb_zero_point_1 = _mm_set1_epi16(
(int16_t)(uint16_t)quantization_params->sse2.kernel_zero_points[
output_channel_index + 1]);
const __m128i vb_zero_point_2 = _mm_set1_epi16(
(int16_t)(uint16_t)quantization_params->sse2.kernel_zero_points[
output_channel_index + 2]);
const __m128i vb_zero_point_3 = _mm_set1_epi16(
(int16_t)(uint16_t)quantization_params->sse2.kernel_zero_points[
output_channel_index + 3]);
const __m128i vzero = _mm_setzero_si128();
for (; k >= 8; k -= 8) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*)a0);
const __m128i vxa0 =
sub_zero_point(_mm_unpacklo_epi8(va0, vzero), va_zero_point);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*)a1);
const __m128i vxa1 =
sub_zero_point(_mm_unpacklo_epi8(va1, vzero), va_zero_point);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*)b0);
const __m128i vxb0 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point_0);
b0 += b_stride;
const __m128i vb1 = _mm_loadl_epi64((const __m128i*)b1);
const __m128i vxb1 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point_1);
b1 += b_stride;
const __m128i vb2 = _mm_loadl_epi64((const __m128i*)b2);
const __m128i vxb2 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point_2);
b2 += b_stride;
const __m128i vb3 = _mm_loadl_epi64((const __m128i*)b3);
const __m128i vxb3 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point_3);
b3 += b_stride;
vacc00 = _mm_add_epi32(vacc00, _mm_madd_epi16(vxa0, vxb0));
vacc01 = _mm_add_epi32(vacc01, _mm_madd_epi16(vxa0, vxb1));
vacc02 = _mm_add_epi32(vacc02, _mm_madd_epi16(vxa0, vxb2));
vacc03 = _mm_add_epi32(vacc03, _mm_madd_epi16(vxa0, vxb3));
vacc10 = _mm_add_epi32(vacc10, _mm_madd_epi16(vxa1, vxb0));
vacc11 = _mm_add_epi32(vacc11, _mm_madd_epi16(vxa1, vxb1));
vacc12 = _mm_add_epi32(vacc12, _mm_madd_epi16(vxa1, vxb2));
vacc13 = _mm_add_epi32(vacc13, _mm_madd_epi16(vxa1, vxb3));
}
if (k != 0) {
const size_t a_predecrement = 8 - k;
const __m128i va_shift = _mm_cvtsi32_si128(8 * a_predecrement);
const __m128i va_zero_point_partial = _mm_unpacklo_epi8(
_mm_srl_epi64(_mm_packus_epi16(va_zero_point, va_zero_point), va_shift),
vzero);
const __m128i va0 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a0 - a_predecrement)), va_shift);
const __m128i vxa0 =
sub_zero_point(_mm_unpacklo_epi8(va0, vzero), va_zero_point_partial);
const __m128i va1 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a1 - a_predecrement)), va_shift);
const __m128i vxa1 =
sub_zero_point(_mm_unpacklo_epi8(va1, vzero), va_zero_point_partial);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*)b0);
const __m128i vxb0 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point_0);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*)b1);
const __m128i vxb1 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point_1);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*)b2);
const __m128i vxb2 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point_2);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*)b3);
const __m128i vxb3 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point_3);
vacc00 = _mm_add_epi32(vacc00, _mm_madd_epi16(vxa0, vxb0));
vacc01 = _mm_add_epi32(vacc01, _mm_madd_epi16(vxa0, vxb1));
vacc02 = _mm_add_epi32(vacc02, _mm_madd_epi16(vxa0, vxb2));
vacc03 = _mm_add_epi32(vacc03, _mm_madd_epi16(vxa0, vxb3));
vacc10 = _mm_add_epi32(vacc10, _mm_madd_epi16(vxa1, vxb0));
vacc11 = _mm_add_epi32(vacc11, _mm_madd_epi16(vxa1, vxb1));
vacc12 = _mm_add_epi32(vacc12, _mm_madd_epi16(vxa1, vxb2));
vacc13 = _mm_add_epi32(vacc13, _mm_madd_epi16(vxa1, vxb3));
}
__m128i vacc0x0123 = pytorch_sse_reduce4_i32(vacc00, vacc01, vacc02, vacc03);
__m128i vacc1x0123 = pytorch_sse_reduce4_i32(vacc10, vacc11, vacc12, vacc13);
const __m128 vmultiplier =
_mm_loadu_ps(&quantization_params->sse2.requantization_scales
[output_channel_index]);
vacc0x0123 = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc0x0123),
vmultiplier
)
);
vacc1x0123 = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc1x0123),
vmultiplier
)
);
const __m128i voutput_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point);
const __m128i vacc01x0123 = _mm_adds_epi16(
_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*)((uintptr_t)c0 + c_stride);
if (mr != 2) {
c1 = c0;
}
if (nr == 4) {
*((uint32_t*)c0) = (uint32_t)_mm_cvtsi128_si32(vout);
*((uint32_t*)c1) = (uint32_t)_mm_cvtsi128_si32(_mm_srli_epi64(vout, 32));
} else {
if (nr >= 2) {
*((uint16_t*)c0) = (uint16_t)_mm_extract_epi16(vout, 0);
c0 += 2;
*((uint16_t*)c1) = (uint16_t)_mm_extract_epi16(vout, 2);
c1 += 2;
vout = _mm_srli_epi32(vout, 16);
nr -= 2;
}
if (nr != 0) {
*((uint8_t*)c0) = (uint8_t)_mm_cvtsi128_si32(vout);
*((uint8_t*)c1) = (uint8_t)_mm_extract_epi16(vout, 2);
}
}
}
| 8,597
| 36.220779
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x-sumrows-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <arm_neon.h>
#include <qnnpack/q8gemm.h>
void pytorch_q8sumrows_ukernel_4x__neon(
const uint8_t* restrict a,
size_t m,
size_t k,
size_t stride,
const int32_t multiplier,
int32_t* restrict a_sum) {
const uint8_t* a0 = a;
const uint8_t* a1 = a0;
if (m >= 2) {
a1 += stride;
}
const uint8_t* a2 = a1;
if (m > 2) {
a2 += stride;
}
const uint8_t* a3 = a2;
if (m == 4) {
a3 += stride;
}
uint32x4_t vacc0x0123 = vmovq_n_u32(0); // row 0
uint32x4_t vacc1x0123 = vmovq_n_u32(0); // row 1
uint32x4_t vacc2x0123 = vmovq_n_u32(0); // row 2
uint32x4_t vacc3x0123 = vmovq_n_u32(0); // row 3
for (; k >= 16; k -= 16) {
// row 0
const uint8x16_t va0x0_15 = vld1q_u8(a0);
a0 += 16;
vacc0x0123 = vpadalq_u16(
vacc0x0123, vaddl_u8(vget_low_u8(va0x0_15), vget_high_u8(va0x0_15)));
// row 1
const uint8x16_t va1x0_15 = vld1q_u8(a1);
a1 += 16;
vacc1x0123 = vpadalq_u16(
vacc1x0123, vaddl_u8(vget_low_u8(va1x0_15), vget_high_u8(va1x0_15)));
// row 2
const uint8x16_t va2x0_15 = vld1q_u8(a2);
a2 += 16;
vacc2x0123 = vpadalq_u16(
vacc2x0123, vaddl_u8(vget_low_u8(va2x0_15), vget_high_u8(va2x0_15)));
// row 3
const uint8x16_t va3x0_15 = vld1q_u8(a3);
a3 += 16;
vacc3x0123 = vpadalq_u16(
vacc3x0123, vaddl_u8(vget_low_u8(va3x0_15), vget_high_u8(va3x0_15)));
}
if (k >= 8) {
vacc0x0123 = vaddw_u16(vacc0x0123, vpaddl_u8(vld1_u8(a0)));
a0 += 8;
vacc1x0123 = vaddw_u16(vacc1x0123, vpaddl_u8(vld1_u8(a1)));
a1 += 8;
vacc2x0123 = vaddw_u16(vacc2x0123, vpaddl_u8(vld1_u8(a2)));
a2 += 8;
vacc3x0123 = vaddw_u16(vacc3x0123, vpaddl_u8(vld1_u8(a3)));
a3 += 8;
k -= 8;
}
if (k >= 4) {
vacc0x0123 = vaddw_u16(
vacc0x0123,
vget_low_u16(vmovl_u8(vreinterpret_u8_u32(
vld1_dup_u32(__builtin_assume_aligned((const uint32_t*)a0, 1))))));
a0 += 4;
vacc1x0123 = vaddw_u16(
vacc1x0123,
vget_low_u16(vmovl_u8(vreinterpret_u8_u32(
vld1_dup_u32(__builtin_assume_aligned((const uint32_t*)a1, 1))))));
a1 += 4;
vacc2x0123 = vaddw_u16(
vacc2x0123,
vget_low_u16(vmovl_u8(vreinterpret_u8_u32(
vld1_dup_u32(__builtin_assume_aligned((const uint32_t*)a2, 1))))));
a2 += 4;
vacc3x0123 = vaddw_u16(
vacc3x0123,
vget_low_u16(vmovl_u8(vreinterpret_u8_u32(
vld1_dup_u32(__builtin_assume_aligned((const uint32_t*)a3, 1))))));
a3 += 4;
k -= 4;
}
const uint32x2_t vsum0x01 =
vpadd_u32(vget_low_u32(vacc0x0123), vget_high_u32(vacc0x0123));
const uint32x2_t vsum1x01 =
vpadd_u32(vget_low_u32(vacc1x0123), vget_high_u32(vacc1x0123));
const uint32x2_t vsum2x01 =
vpadd_u32(vget_low_u32(vacc2x0123), vget_high_u32(vacc2x0123));
const uint32x2_t vsum3x01 =
vpadd_u32(vget_low_u32(vacc3x0123), vget_high_u32(vacc3x0123));
uint32x4_t vacc0123 = vcombine_u32(
vpadd_u32(vsum0x01, vsum1x01), vpadd_u32(vsum2x01, vsum3x01));
if (k >= 2) {
const uint8x8_t va0x01010101 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a0, 1)));
a0 += 2;
const uint8x8_t va1x01010101 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a1, 1)));
a1 += 2;
const uint8x8_t va2x01010101 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a2, 1)));
a2 += 2;
const uint8x8_t va3x01010101 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a3, 1)));
a3 += 2;
const uint8x8_t va0x01_1x010101 = vext_u8(va0x01010101, va1x01010101, 2);
const uint8x8_t va2x01_3x010101 = vext_u8(va2x01010101, va3x01010101, 6);
const uint8x8_t va0123x01 = vext_u8(va0x01_1x010101, va2x01_3x010101, 4);
vacc0123 = vaddw_u16(vacc0123, vpaddl_u8(va0123x01));
k -= 2;
}
if (k > 0) {
uint8x8_t vax0x1x2x3 = vmov_n_u8(0);
vax0x1x2x3 = vld1_lane_u8(a0, vax0x1x2x3, 0);
vax0x1x2x3 = vld1_lane_u8(a1, vax0x1x2x3, 2);
vax0x1x2x3 = vld1_lane_u8(a2, vax0x1x2x3, 4);
vax0x1x2x3 = vld1_lane_u8(a3, vax0x1x2x3, 6);
vacc0123 = vaddw_u16(vacc0123, vpaddl_u8(vax0x1x2x3));
}
int32x4_t vsum0123 = vmulq_n_s32(vreinterpretq_s32_u32(vacc0123), multiplier);
if (m == 4) {
vst1q_s32(a_sum, vsum0123);
} else {
if (m >= 2) {
vst1_s32(a_sum, vget_low_s32(vsum0123));
a_sum += 2;
vsum0123 = vextq_s32(vsum0123, vsum0123, 2);
m -= 2;
}
if (m != 0) {
vst1q_lane_s32(a_sum, vsum0123, 0);
}
}
}
| 4,862
| 30.374194
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x4c2-dq-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <immintrin.h>
#include <qnnpack/q8gemm.h>
#include <requantization/runtime-sse2.h>
void pytorch_q8gemm_dq_ukernel_4x4c2__sse2(
size_t mr,
size_t nr,
size_t k,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
const float* restrict b,
float* restrict c,
size_t c_stride,
size_t output_channel_index,
const struct pytorch_qnnp_conv_dynamic_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
__m128i vacc0x0123 = _mm_setzero_si128();
__m128i vacc1x0123 = _mm_setzero_si128();
__m128i vacc2x0123 = _mm_setzero_si128();
__m128i vacc3x0123 = _mm_setzero_si128();
w = (const void*)((uintptr_t)w + 16);
const uint8_t* a0 = a;
const uint8_t* a1 = (const uint8_t*)((uintptr_t)a0 + a_stride);
if (mr < 2) {
a1 = a0;
}
const uint8_t* a2 = (const uint8_t*)((uintptr_t)a1 + a_stride);
if (mr <= 2) {
a2 = a1;
}
const uint8_t* a3 = (const uint8_t*)((uintptr_t)a2 + a_stride);
if (mr != 4) {
a3 = a2;
}
const __m128i va_zero_point = _mm_set1_epi16(quantization_params->input_zero_point);
const int16_t vb_zero_point_0 =
(int16_t)(uint16_t)quantization_params->kernel_zero_points[
output_channel_index];
const int16_t vb_zero_point_1 =
(int16_t)(uint16_t)quantization_params->kernel_zero_points[
output_channel_index + 1];
const int16_t vb_zero_point_2 =
(int16_t)(uint16_t)quantization_params->kernel_zero_points[
output_channel_index + 2];
const int16_t vb_zero_point_3 =
(int16_t)(uint16_t)quantization_params->kernel_zero_points[
output_channel_index + 3];
__m128i vb_zero_point = _mm_set_epi16(vb_zero_point_3,
vb_zero_point_3,
vb_zero_point_2,
vb_zero_point_2,
vb_zero_point_1,
vb_zero_point_1,
vb_zero_point_0,
vb_zero_point_0
);
const __m128 vmultiplier =
_mm_loadu_ps(&quantization_params->multipliers[output_channel_index]);
const __m128 vbias = _mm_load_ps(b);
const __m128i vzero = _mm_setzero_si128();
for (; k >= 8; k -= 8) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*)a0);
const __m128i vxa0 =
sub_zero_point(_mm_unpacklo_epi8(va0, vzero), va_zero_point);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*)a1);
const __m128i vxa1 =
sub_zero_point(_mm_unpacklo_epi8(va1, vzero), va_zero_point);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*)a2);
const __m128i vxa2 =
sub_zero_point(_mm_unpacklo_epi8(va2, vzero), va_zero_point);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*)a3);
const __m128i vxa3 =
sub_zero_point(_mm_unpacklo_epi8(va3, vzero), va_zero_point);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*)w);
const __m128i vxb0 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 8));
const __m128i vxb1 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 16));
const __m128i vxb2 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 24));
const __m128i vxb3 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
w = (const void*)((uintptr_t)w + 32);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
}
if (k != 0) {
const size_t a_predecrement = 8 - k;
const __m128i va_shift = _mm_cvtsi32_si128(8 * a_predecrement);
const __m128i va0 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a0 - a_predecrement)), va_shift);
const __m128i vxa0 =
sub_zero_point(_mm_unpacklo_epi8(va0, vzero), va_zero_point);
const __m128i va1 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a1 - a_predecrement)), va_shift);
const __m128i vxa1 =
sub_zero_point(_mm_unpacklo_epi8(va1, vzero), va_zero_point);
const __m128i va2 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a2 - a_predecrement)), va_shift);
const __m128i vxa2 =
sub_zero_point(_mm_unpacklo_epi8(va2, vzero), va_zero_point);
const __m128i va3 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a3 - a_predecrement)), va_shift);
const __m128i vxa3 =
sub_zero_point(_mm_unpacklo_epi8(va3, vzero), va_zero_point);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*)w);
const __m128i vxb0 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 8));
const __m128i vxb1 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4) {
const __m128i vb2 =
_mm_loadl_epi64((const __m128i*)((uintptr_t)w + 16));
const __m128i vxb2 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
if (k > 6) {
const __m128i vb3 =
_mm_loadl_epi64((const __m128i*)((uintptr_t)w + 24));
const __m128i vxb3 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
}
}
}
}
__m128 vout0 = _mm_mul_ps(vmultiplier, _mm_cvtepi32_ps(vacc0x0123));
__m128 vout1 = _mm_mul_ps(vmultiplier, _mm_cvtepi32_ps(vacc1x0123));
__m128 vout2 = _mm_mul_ps(vmultiplier, _mm_cvtepi32_ps(vacc2x0123));
__m128 vout3 = _mm_mul_ps(vmultiplier, _mm_cvtepi32_ps(vacc3x0123));
vout0 = _mm_add_ps(vout0, vbias);
vout1 = _mm_add_ps(vout1, vbias);
vout2 = _mm_add_ps(vout2, vbias);
vout3 = _mm_add_ps(vout3, vbias);
float* c0 = c;
float* c1 = c0 + c_stride;
if (mr < 2) {
c1 = c0;
}
float* c2 = c1 + c_stride;
if (mr <= 2) {
c2 = c1;
}
float* c3 = c2 + c_stride;
if (mr != 4) {
c3 = c2;
}
if (nr == 4) {
_mm_storeu_ps(c0, vout0);
_mm_storeu_ps(c1, vout1);
_mm_storeu_ps(c2, vout2);
_mm_storeu_ps(c3, vout3);
} else {
if (nr >= 2) {
_mm_storel_pi((__m64*)c0, vout0);
_mm_storel_pi((__m64*)c1, vout1);
_mm_storel_pi((__m64*)c2, vout2);
_mm_storel_pi((__m64*)c3, vout3);
c0 += 2;
vout0 = _mm_shuffle_ps(vout0, vout0, _MM_SHUFFLE(2, 2, 2, 2));
c1 += 2;
vout1 = _mm_shuffle_ps(vout1, vout1, _MM_SHUFFLE(2, 2, 2, 2));
c2 += 2;
vout2 = _mm_shuffle_ps(vout2, vout2, _MM_SHUFFLE(2, 2, 2, 2));
c3 += 2;
vout3 = _mm_shuffle_ps(vout3, vout3, _MM_SHUFFLE(2, 2, 2, 2));
nr -= 2;
}
if (nr != 0) {
*c0 = _mm_cvtss_f32(vout0);
*c1 = _mm_cvtss_f32(vout1);
*c2 = _mm_cvtss_f32(vout2);
*c3 = _mm_cvtss_f32(vout3);
}
}
}
| 11,857
| 35.042553
| 86
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x4c2-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <immintrin.h>
#include <qnnpack/q8gemm.h>
#include <requantization/runtime-sse2.h>
void pytorch_q8gemm_ukernel_4x4c2__sse2(
size_t mr,
size_t nr,
size_t k,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t c_stride,
size_t output_channel_index,
const union pytorch_qnnp_conv_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*)w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*)((uintptr_t)w + 16);
const uint8_t* a0 = a;
const uint8_t* a1 = (const uint8_t*)((uintptr_t)a0 + a_stride);
if (mr < 2) {
a1 = a0;
}
const uint8_t* a2 = (const uint8_t*)((uintptr_t)a1 + a_stride);
if (mr <= 2) {
a2 = a1;
}
const uint8_t* a3 = (const uint8_t*)((uintptr_t)a2 + a_stride);
if (mr != 4) {
a3 = a2;
}
const __m128i va_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.input_zero_point);
const int16_t vb_zero_point_0 =
(int16_t)(uint16_t)quantization_params->sse2.kernel_zero_points[
output_channel_index];
const int16_t vb_zero_point_1 =
(int16_t)(uint16_t)quantization_params->sse2.kernel_zero_points[
output_channel_index + 1];
const int16_t vb_zero_point_2 =
(int16_t)(uint16_t)quantization_params->sse2.kernel_zero_points[
output_channel_index + 2];
const int16_t vb_zero_point_3 =
(int16_t)(uint16_t)quantization_params->sse2.kernel_zero_points[
output_channel_index + 3];
__m128i vb_zero_point = _mm_set_epi16(vb_zero_point_3,
vb_zero_point_3,
vb_zero_point_2,
vb_zero_point_2,
vb_zero_point_1,
vb_zero_point_1,
vb_zero_point_0,
vb_zero_point_0
);
const __m128i vzero = _mm_setzero_si128();
for (; k >= 8; k -= 8) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*)a0);
const __m128i vxa0 =
sub_zero_point(_mm_unpacklo_epi8(va0, vzero), va_zero_point);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*)a1);
const __m128i vxa1 =
sub_zero_point(_mm_unpacklo_epi8(va1, vzero), va_zero_point);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*)a2);
const __m128i vxa2 =
sub_zero_point(_mm_unpacklo_epi8(va2, vzero), va_zero_point);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*)a3);
const __m128i vxa3 =
sub_zero_point(_mm_unpacklo_epi8(va3, vzero), va_zero_point);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*)w);
const __m128i vxb0 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 8));
const __m128i vxb1 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 16));
const __m128i vxb2 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 24));
const __m128i vxb3 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
w = (const void*)((uintptr_t)w + 32);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
}
if (k != 0) {
const size_t a_predecrement = 8 - k;
const __m128i va_shift = _mm_cvtsi32_si128(8 * a_predecrement);
const __m128i va0 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a0 - a_predecrement)), va_shift);
const __m128i vxa0 =
sub_zero_point(_mm_unpacklo_epi8(va0, vzero), va_zero_point);
const __m128i va1 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a1 - a_predecrement)), va_shift);
const __m128i vxa1 =
sub_zero_point(_mm_unpacklo_epi8(va1, vzero), va_zero_point);
const __m128i va2 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a2 - a_predecrement)), va_shift);
const __m128i vxa2 =
sub_zero_point(_mm_unpacklo_epi8(va2, vzero), va_zero_point);
const __m128i va3 = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a3 - a_predecrement)), va_shift);
const __m128i vxa3 =
sub_zero_point(_mm_unpacklo_epi8(va3, vzero), va_zero_point);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*)w);
const __m128i vxb0 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*)((uintptr_t)w + 8));
const __m128i vxb1 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4) {
const __m128i vb2 =
_mm_loadl_epi64((const __m128i*)((uintptr_t)w + 16));
const __m128i vxb2 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
if (k > 6) {
const __m128i vb3 =
_mm_loadl_epi64((const __m128i*)((uintptr_t)w + 24));
const __m128i vxb3 =
_mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(
vacc0x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(
vacc1x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(
vacc2x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(
vacc3x0123,
_mm_madd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
}
}
}
}
const __m128 vmultiplier =
_mm_loadu_ps(&quantization_params->sse2.requantization_scales[output_channel_index]);
vacc0x0123 = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc0x0123),
vmultiplier
)
);
vacc1x0123 = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc1x0123),
vmultiplier
)
);
vacc2x0123 = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc2x0123),
vmultiplier
)
);
vacc3x0123 = _mm_cvtps_epi32(
_mm_mul_ps(
_mm_cvtepi32_ps(vacc3x0123),
vmultiplier
)
);
const __m128i voutput_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.output_zero_point);
const __m128i vacc01x0123 = _mm_adds_epi16(
_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
const __m128i vacc23x0123 = _mm_adds_epi16(
_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_min_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_max));
vout = _mm_max_epu8(
vout,
_mm_load_si128((const __m128i*)quantization_params->sse2.output_min));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*)((uintptr_t)c0 + c_stride);
if (mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*)((uintptr_t)c1 + c_stride);
if (mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*)((uintptr_t)c2 + c_stride);
if (mr != 4) {
c3 = c2;
}
if (nr == 4) {
*((uint32_t*)c0) = (uint32_t)_mm_cvtsi128_si32(vout);
*((uint32_t*)c1) = (uint32_t)_mm_cvtsi128_si32(_mm_srli_epi64(vout, 32));
*((uint32_t*)c2) =
(uint32_t)_mm_cvtsi128_si32(_mm_unpackhi_epi32(vout, vout));
*((uint32_t*)c3) = (uint32_t)_mm_cvtsi128_si32(_mm_srli_si128(vout, 12));
} else {
typedef PYTORCH_QNNP_UNALIGNED uint16_t unaligned_uint16_t;
if (nr >= 2) {
*((unaligned_uint16_t*)c0) = (uint16_t)_mm_extract_epi16(vout, 0);
c0 += 2;
*((unaligned_uint16_t*)c1) = (uint16_t)_mm_extract_epi16(vout, 2);
c1 += 2;
*((unaligned_uint16_t*)c2) = (uint16_t)_mm_extract_epi16(vout, 4);
c2 += 2;
*((unaligned_uint16_t*)c3) = (uint16_t)_mm_extract_epi16(vout, 6);
c3 += 2;
vout = _mm_srli_epi32(vout, 16);
nr -= 2;
}
if (nr != 0) {
*((uint8_t*)c0) = (uint8_t)_mm_cvtsi128_si32(vout);
*((uint8_t*)c1) = (uint8_t)_mm_extract_epi16(vout, 2);
*((uint8_t*)c2) = (uint8_t)_mm_extract_epi16(vout, 4);
*((uint8_t*)c3) = (uint8_t)_mm_extract_epi16(vout, 6);
}
}
}
| 13,062
| 36.216524
| 91
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x8-dq-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <arm_neon.h>
#include <qnnpack/q8gemm.h>
#include <requantization/runtime-neon.h>
void pytorch_q8gemm_dq_ukernel_4x8__neon(
size_t mr,
size_t nr,
size_t k,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
const float* restrict b,
float* restrict c,
size_t c_stride,
size_t output_channel_index,
const struct pytorch_qnnp_conv_dynamic_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
int32x4_t vacc0x0123 = {};
int32x4_t vacc0x4567 = {};
int32x4_t vacc1x0123 = {};
int32x4_t vacc1x4567 = {};
int32x4_t vacc2x0123 = {};
int32x4_t vacc2x4567 = {};
int32x4_t vacc3x0123 = {};
int32x4_t vacc3x4567 = {};
w = (const void*)((uintptr_t)w + 32);
const uint8_t* a0 = a;
const uint8_t* a1 = (const uint8_t*)((uintptr_t)a0 + a_stride);
if (mr < 2) {
a1 = a0;
}
const uint8_t* a2 = (const uint8_t*)((uintptr_t)a1 + a_stride);
if (mr <= 2) {
a2 = a1;
}
const uint8_t* a3 = (const uint8_t*)((uintptr_t)a2 + a_stride);
if (mr != 4) {
a3 = a2;
}
const uint8x8_t va_zero_point =
vld1_dup_u8((const uint8_t*)&quantization_params->input_zero_point);
// Assumes that kernel_zero_points is an array padded with necessary elements
// in order to make it multiple of 8.
const uint8x8_t vb_zero_point =
vld1_u8((const uint8_t*)&quantization_params->kernel_zero_points
[output_channel_index]);
const float32x4_t vmultiplier_c0123 =
vld1q_f32(&quantization_params->multipliers[output_channel_index]);
const float32x4_t vmultiplier_c4567 =
vld1q_f32(&quantization_params->multipliers[output_channel_index + 4]);
const float32x4_t vbias[] = {
vld1q_f32(b),
vld1q_f32(b + 4),
};
for (; k >= 8; k -= 8) {
const uint8x8_t va0 = vld1_u8(a0);
a0 += 8;
const int16x8_t vxa0 =
vreinterpretq_s16_u16(sub_zero_point(va0, va_zero_point));
const uint8x8_t va1 = vld1_u8(a1);
a1 += 8;
const int16x8_t vxa1 =
vreinterpretq_s16_u16(sub_zero_point(va1, va_zero_point));
const uint8x8_t va2 = vld1_u8(a2);
a2 += 8;
const int16x8_t vxa2 =
vreinterpretq_s16_u16(sub_zero_point(va2, va_zero_point));
const uint8x8_t va3 = vld1_u8(a3);
a3 += 8;
const int16x8_t vxa3 =
vreinterpretq_s16_u16(sub_zero_point(va3, va_zero_point));
const uint8x8_t vb01234567c0 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c0 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
const uint8x8_t vb01234567c1 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c1 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
const uint8x8_t vb01234567c2 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c2 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
const uint8x8_t vb01234567c3 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c3 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
const uint8x8_t vb01234567c4 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c4 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
const uint8x8_t vb01234567c5 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c5 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
const uint8x8_t vb01234567c6 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c6 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
const uint8x8_t vb01234567c7 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c7 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c7, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa3), 3);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa3), 3);
}
if (k != 0) {
const size_t a_predecrement = 8 - k;
const int64x1_t va_shift = vmov_n_s64(-8 * a_predecrement);
const uint8x8_t va0 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(a0 - a_predecrement)), va_shift));
const int16x8_t vxa0 =
vreinterpretq_s16_u16(sub_zero_point(va0, va_zero_point));
const uint8x8_t va1 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(a1 - a_predecrement)), va_shift));
const int16x8_t vxa1 =
vreinterpretq_s16_u16(sub_zero_point(va1, va_zero_point));
const uint8x8_t va2 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(a2 - a_predecrement)), va_shift));
const int16x8_t vxa2 =
vreinterpretq_s16_u16(sub_zero_point(va2, va_zero_point));
const uint8x8_t va3 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(a3 - a_predecrement)), va_shift));
const int16x8_t vxa3 =
vreinterpretq_s16_u16(sub_zero_point(va3, va_zero_point));
const uint8x8_t vb01234567c0 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c0 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
if (k >= 2) {
const uint8x8_t vb01234567c1 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c1 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
if (k >= 3) {
const uint8x8_t vb01234567c2 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c2 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
if (k >= 4) {
const uint8x8_t vb01234567c3 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c3 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
if (k >= 5) {
const uint8x8_t vb01234567c4 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c4 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123,
vget_low_s16(vxb01234567c4),
vget_high_s16(vxa0),
0);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567,
vget_high_s16(vxb01234567c4),
vget_high_s16(vxa0),
0);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123,
vget_low_s16(vxb01234567c4),
vget_high_s16(vxa1),
0);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567,
vget_high_s16(vxb01234567c4),
vget_high_s16(vxa1),
0);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123,
vget_low_s16(vxb01234567c4),
vget_high_s16(vxa2),
0);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567,
vget_high_s16(vxb01234567c4),
vget_high_s16(vxa2),
0);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123,
vget_low_s16(vxb01234567c4),
vget_high_s16(vxa3),
0);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567,
vget_high_s16(vxb01234567c4),
vget_high_s16(vxa3),
0);
if (k >= 6) {
const uint8x8_t vb01234567c5 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c5 =
vreinterpretq_s16_u16(vsubl_u8(vb01234567c5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123,
vget_low_s16(vxb01234567c5),
vget_high_s16(vxa0),
1);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567,
vget_high_s16(vxb01234567c5),
vget_high_s16(vxa0),
1);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123,
vget_low_s16(vxb01234567c5),
vget_high_s16(vxa1),
1);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567,
vget_high_s16(vxb01234567c5),
vget_high_s16(vxa1),
1);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123,
vget_low_s16(vxb01234567c5),
vget_high_s16(vxa2),
1);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567,
vget_high_s16(vxb01234567c5),
vget_high_s16(vxa2),
1);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123,
vget_low_s16(vxb01234567c5),
vget_high_s16(vxa3),
1);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567,
vget_high_s16(vxb01234567c5),
vget_high_s16(vxa3),
1);
if (k >= 7) {
const uint8x8_t vb01234567c6 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb01234567c6 = vreinterpretq_s16_u16(
vsubl_u8(vb01234567c6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123,
vget_low_s16(vxb01234567c6),
vget_high_s16(vxa0),
2);
vacc0x4567 = vmlal_lane_s16(
vacc0x4567,
vget_high_s16(vxb01234567c6),
vget_high_s16(vxa0),
2);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123,
vget_low_s16(vxb01234567c6),
vget_high_s16(vxa1),
2);
vacc1x4567 = vmlal_lane_s16(
vacc1x4567,
vget_high_s16(vxb01234567c6),
vget_high_s16(vxa1),
2);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123,
vget_low_s16(vxb01234567c6),
vget_high_s16(vxa2),
2);
vacc2x4567 = vmlal_lane_s16(
vacc2x4567,
vget_high_s16(vxb01234567c6),
vget_high_s16(vxa2),
2);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123,
vget_low_s16(vxb01234567c6),
vget_high_s16(vxa3),
2);
vacc3x4567 = vmlal_lane_s16(
vacc3x4567,
vget_high_s16(vxb01234567c6),
vget_high_s16(vxa3),
2);
}
}
}
}
}
}
}
float32x4_t vout0[] = {
vaddq_f32(vmulq_f32(vmultiplier_c0123, vcvtq_f32_s32(vacc0x0123)), vbias[0]),
vaddq_f32(vmulq_f32(vmultiplier_c4567, vcvtq_f32_s32(vacc0x4567)), vbias[1]),
};
float32x4_t vout1[] = {
vaddq_f32(vmulq_f32(vmultiplier_c0123, vcvtq_f32_s32(vacc1x0123)), vbias[0]),
vaddq_f32(vmulq_f32(vmultiplier_c4567, vcvtq_f32_s32(vacc1x4567)), vbias[1]),
};
float32x4_t vout2[] = {
vaddq_f32(vmulq_f32(vmultiplier_c0123, vcvtq_f32_s32(vacc2x0123)), vbias[0]),
vaddq_f32(vmulq_f32(vmultiplier_c4567, vcvtq_f32_s32(vacc2x4567)), vbias[1]),
};
float32x4_t vout3[] = {
vaddq_f32(vmulq_f32(vmultiplier_c0123, vcvtq_f32_s32(vacc3x0123)), vbias[0]),
vaddq_f32(vmulq_f32(vmultiplier_c4567, vcvtq_f32_s32(vacc3x4567)), vbias[1]),
};
float32x4_t * vout0_ptr = vout0;
float32x4_t * vout1_ptr = vout1;
float32x4_t * vout2_ptr = vout2;
float32x4_t * vout3_ptr = vout3;
float* c0 = c;
float* c1 = c0 + c_stride;
if (mr < 2) {
c1 = c0;
}
float* c2 = c1 + c_stride;
if (mr <= 2) {
c2 = c1;
}
float* c3 = c2 + c_stride;
if (mr != 4) {
c3 = c2;
}
for (; nr >= 4; nr -= 4) {
vst1q_f32(c0, *vout0_ptr++);
vst1q_f32(c1, *vout1_ptr++);
vst1q_f32(c2, *vout2_ptr++);
vst1q_f32(c3, *vout3_ptr++);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
}
if (nr >= 2) {
vst1_f32(c0, vget_low_f32(*vout0_ptr));
vst1_f32(c1, vget_low_f32(*vout1_ptr));
vst1_f32(c2, vget_low_f32(*vout2_ptr));
vst1_f32(c3, vget_low_f32(*vout3_ptr));
c0 += 2;
(*vout0_ptr)[0] = (*vout0_ptr)[2];
c1 += 2;
(*vout1_ptr)[0] = (*vout1_ptr)[2];
c2 += 2;
(*vout2_ptr)[0] = (*vout2_ptr)[2];
c3 += 2;
(*vout3_ptr)[0] = (*vout3_ptr)[2];
nr -= 2;
}
if (nr != 0) {
vst1q_lane_f32(c0, *vout0_ptr, 0);
vst1q_lane_f32(c1, *vout1_ptr, 0);
vst1q_lane_f32(c2, *vout2_ptr, 0);
vst1q_lane_f32(c3, *vout3_ptr, 0);
}
}
| 23,664
| 38.840067
| 81
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x8c2-xzp-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <arm_neon.h>
#include <qnnpack/q8gemm.h>
void pytorch_q8gemm_xzp_ukernel_4x8c2__neon(
size_t mr,
size_t nr,
size_t k,
const uint8_t* restrict a,
size_t a_stride,
const int32_t* restrict a_sum,
const void* restrict w,
uint8_t* restrict c,
size_t c_stride,
const union pytorch_qnnp_q31_requantization_params
requantization_params[restrict static 1]) {
int32x4_t vacc0x0123 = vld1q_s32(w);
w = (const void*)((uintptr_t)w + 16);
int32x4_t vacc0x4567 = vld1q_s32(w);
w = (const void*)((uintptr_t)w + 16);
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc3x4567 = vacc0x4567;
const uint8_t* a0 = a;
const uint8_t* a1 = a0;
const int32_t* a_sum0 = a_sum;
const int32_t* a_sum1 = a_sum0;
if (mr >= 2) {
a1 += a_stride;
a_sum1 += 1;
}
const uint8_t* a2 = a1;
const int32_t* a_sum2 = a_sum1;
if (mr > 2) {
a2 += a_stride;
a_sum2 += 1;
}
const uint8_t* a3 = a2;
const int32_t* a_sum3 = a_sum2;
if (mr == 4) {
a3 += a_stride;
a_sum3 += 1;
}
const int32x4_t va_sum0 = vld1q_dup_s32(a_sum0);
const int32x4_t va_sum1 = vld1q_dup_s32(a_sum1);
const int32x4_t va_sum2 = vld1q_dup_s32(a_sum2);
const int32x4_t va_sum3 = vld1q_dup_s32(a_sum3);
vacc0x0123 = vaddq_s32(vacc0x0123, va_sum0);
vacc0x4567 = vaddq_s32(vacc0x4567, va_sum0);
vacc1x0123 = vaddq_s32(vacc1x0123, va_sum1);
vacc1x4567 = vaddq_s32(vacc1x4567, va_sum1);
vacc2x0123 = vaddq_s32(vacc2x0123, va_sum2);
vacc2x4567 = vaddq_s32(vacc2x4567, va_sum2);
vacc3x0123 = vaddq_s32(vacc3x0123, va_sum3);
vacc3x4567 = vaddq_s32(vacc3x4567, va_sum3);
for (; k >= 8; k -= 8) {
uint8x8_t va0x01234567 = vld1_u8(a0);
a0 += 8;
uint8x8_t va1x01234567 = vld1_u8(a1);
a1 += 8;
uint8x8_t va2x01234567 = vld1_u8(a2);
a2 += 8;
uint8x8_t va3x01234567 = vld1_u8(a3);
a3 += 8;
/* k = 0, 1 */
const uint8x16_t vb01234567x01 = vld1q_u8(w);
w += 16;
vacc0x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x0123),
vmull_u8(va0x01234567, vget_low_u8(vb01234567x01))));
vacc0x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x4567),
vmull_u8(va0x01234567, vget_high_u8(vb01234567x01))));
vacc1x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x0123),
vmull_u8(va1x01234567, vget_low_u8(vb01234567x01))));
vacc1x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x4567),
vmull_u8(va1x01234567, vget_high_u8(vb01234567x01))));
vacc2x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x0123),
vmull_u8(va2x01234567, vget_low_u8(vb01234567x01))));
vacc2x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x4567),
vmull_u8(va2x01234567, vget_high_u8(vb01234567x01))));
vacc3x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x0123),
vmull_u8(va3x01234567, vget_low_u8(vb01234567x01))));
vacc3x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x4567),
vmull_u8(va3x01234567, vget_high_u8(vb01234567x01))));
/* k = 2, 3 */
va0x01234567 = vext_u8(va0x01234567, va0x01234567, 2);
va1x01234567 = vext_u8(va1x01234567, va1x01234567, 2);
va2x01234567 = vext_u8(va2x01234567, va2x01234567, 2);
va3x01234567 = vext_u8(va3x01234567, va3x01234567, 2);
const uint8x16_t vb01234567x23 = vld1q_u8(w);
w += 16;
vacc0x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x0123),
vmull_u8(va0x01234567, vget_low_u8(vb01234567x23))));
vacc0x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x4567),
vmull_u8(va0x01234567, vget_high_u8(vb01234567x23))));
vacc1x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x0123),
vmull_u8(va1x01234567, vget_low_u8(vb01234567x23))));
vacc1x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x4567),
vmull_u8(va1x01234567, vget_high_u8(vb01234567x23))));
vacc2x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x0123),
vmull_u8(va2x01234567, vget_low_u8(vb01234567x23))));
vacc2x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x4567),
vmull_u8(va2x01234567, vget_high_u8(vb01234567x23))));
vacc3x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x0123),
vmull_u8(va3x01234567, vget_low_u8(vb01234567x23))));
vacc3x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x4567),
vmull_u8(va3x01234567, vget_high_u8(vb01234567x23))));
/* k = 4, 5 */
va0x01234567 = vext_u8(va0x01234567, va0x01234567, 2);
va1x01234567 = vext_u8(va1x01234567, va1x01234567, 2);
va2x01234567 = vext_u8(va2x01234567, va2x01234567, 2);
va3x01234567 = vext_u8(va3x01234567, va3x01234567, 2);
const uint8x16_t vb01234567x45 = vld1q_u8(w);
w += 16;
vacc0x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x0123),
vmull_u8(va0x01234567, vget_low_u8(vb01234567x45))));
vacc0x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x4567),
vmull_u8(va0x01234567, vget_high_u8(vb01234567x45))));
vacc1x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x0123),
vmull_u8(va1x01234567, vget_low_u8(vb01234567x45))));
vacc1x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x4567),
vmull_u8(va1x01234567, vget_high_u8(vb01234567x45))));
vacc2x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x0123),
vmull_u8(va2x01234567, vget_low_u8(vb01234567x45))));
vacc2x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x4567),
vmull_u8(va2x01234567, vget_high_u8(vb01234567x45))));
vacc3x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x0123),
vmull_u8(va3x01234567, vget_low_u8(vb01234567x45))));
vacc3x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x4567),
vmull_u8(va3x01234567, vget_high_u8(vb01234567x45))));
/* k = 6, 7 */
va0x01234567 = vext_u8(va0x01234567, va0x01234567, 2);
va1x01234567 = vext_u8(va1x01234567, va1x01234567, 2);
va2x01234567 = vext_u8(va2x01234567, va2x01234567, 2);
va3x01234567 = vext_u8(va3x01234567, va3x01234567, 2);
const uint8x16_t vb01234567x67 = vld1q_u8(w);
w += 16;
vacc0x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x0123),
vmull_u8(va0x01234567, vget_low_u8(vb01234567x67))));
vacc0x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x4567),
vmull_u8(va0x01234567, vget_high_u8(vb01234567x67))));
vacc1x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x0123),
vmull_u8(va1x01234567, vget_low_u8(vb01234567x67))));
vacc1x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x4567),
vmull_u8(va1x01234567, vget_high_u8(vb01234567x67))));
vacc2x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x0123),
vmull_u8(va2x01234567, vget_low_u8(vb01234567x67))));
vacc2x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x4567),
vmull_u8(va2x01234567, vget_high_u8(vb01234567x67))));
vacc3x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x0123),
vmull_u8(va3x01234567, vget_low_u8(vb01234567x67))));
vacc3x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x4567),
vmull_u8(va3x01234567, vget_high_u8(vb01234567x67))));
}
/* for k < 8, reuse the packing scheme for the original xzp ukernel */
if (k & 4) {
/* k = 0, 1 */
const uint8x8_t va0x01010101 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a0, 1)));
a0 += 2;
const uint8x8_t va1x01010101 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a1, 1)));
a1 += 2;
const uint8x8_t va2x01010101 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a2, 1)));
a2 += 2;
const uint8x8_t va3x01010101 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a3, 1)));
a3 += 2;
const uint8x16_t vb01234567x01 = vld1q_u8(w);
w += 16;
vacc0x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x0123),
vmull_u8(va0x01010101, vget_low_u8(vb01234567x01))));
vacc0x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x4567),
vmull_u8(va0x01010101, vget_high_u8(vb01234567x01))));
vacc1x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x0123),
vmull_u8(va1x01010101, vget_low_u8(vb01234567x01))));
vacc1x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x4567),
vmull_u8(va1x01010101, vget_high_u8(vb01234567x01))));
vacc2x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x0123),
vmull_u8(va2x01010101, vget_low_u8(vb01234567x01))));
vacc2x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x4567),
vmull_u8(va2x01010101, vget_high_u8(vb01234567x01))));
vacc3x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x0123),
vmull_u8(va3x01010101, vget_low_u8(vb01234567x01))));
vacc3x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x4567),
vmull_u8(va3x01010101, vget_high_u8(vb01234567x01))));
/* k = 2, 3 */
const uint8x8_t va0x23232323 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a0, 1)));
a0 += 2;
const uint8x8_t va1x23232323 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a1, 1)));
a1 += 2;
const uint8x8_t va2x23232323 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a2, 1)));
a2 += 2;
const uint8x8_t va3x23232323 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a3, 1)));
a3 += 2;
const uint8x16_t vb01234567x23 = vld1q_u8(w);
w += 16;
vacc0x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x0123),
vmull_u8(va0x23232323, vget_low_u8(vb01234567x23))));
vacc0x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x4567),
vmull_u8(va0x23232323, vget_high_u8(vb01234567x23))));
vacc1x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x0123),
vmull_u8(va1x23232323, vget_low_u8(vb01234567x23))));
vacc1x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x4567),
vmull_u8(va1x23232323, vget_high_u8(vb01234567x23))));
vacc2x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x0123),
vmull_u8(va2x23232323, vget_low_u8(vb01234567x23))));
vacc2x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x4567),
vmull_u8(va2x23232323, vget_high_u8(vb01234567x23))));
vacc3x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x0123),
vmull_u8(va3x23232323, vget_low_u8(vb01234567x23))));
vacc3x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x4567),
vmull_u8(va3x23232323, vget_high_u8(vb01234567x23))));
}
if (k & 2) {
/* k = 0, 1 */
const uint8x8_t va0x01010101 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a0, 1)));
a0 += 2;
const uint8x8_t va1x01010101 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a1, 1)));
a1 += 2;
const uint8x8_t va2x01010101 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a2, 1)));
a2 += 2;
const uint8x8_t va3x01010101 = vreinterpret_u8_u16(
vld1_dup_u16(__builtin_assume_aligned((const uint16_t*)a3, 1)));
a3 += 2;
const uint8x16_t vb01234567x01 = vld1q_u8(w);
w += 16;
vacc0x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x0123),
vmull_u8(va0x01010101, vget_low_u8(vb01234567x01))));
vacc0x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x4567),
vmull_u8(va0x01010101, vget_high_u8(vb01234567x01))));
vacc1x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x0123),
vmull_u8(va1x01010101, vget_low_u8(vb01234567x01))));
vacc1x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x4567),
vmull_u8(va1x01010101, vget_high_u8(vb01234567x01))));
vacc2x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x0123),
vmull_u8(va2x01010101, vget_low_u8(vb01234567x01))));
vacc2x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x4567),
vmull_u8(va2x01010101, vget_high_u8(vb01234567x01))));
vacc3x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x0123),
vmull_u8(va3x01010101, vget_low_u8(vb01234567x01))));
vacc3x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x4567),
vmull_u8(va3x01010101, vget_high_u8(vb01234567x01))));
}
if (k & 1) {
const uint8x8_t va0x00000000 = vld1_dup_u8(a0);
const uint8x8_t va1x00000000 = vld1_dup_u8(a1);
const uint8x8_t va2x00000000 = vld1_dup_u8(a2);
const uint8x8_t va3x00000000 = vld1_dup_u8(a3);
const uint8x16_t vb01234567x0 = vld1q_u8(w);
vacc0x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x0123),
vmull_u8(va0x00000000, vget_low_u8(vb01234567x0))));
vacc0x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc0x4567),
vmull_u8(va0x00000000, vget_high_u8(vb01234567x0))));
vacc1x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x0123),
vmull_u8(va1x00000000, vget_low_u8(vb01234567x0))));
vacc1x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc1x4567),
vmull_u8(va1x00000000, vget_high_u8(vb01234567x0))));
vacc2x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x0123),
vmull_u8(va2x00000000, vget_low_u8(vb01234567x0))));
vacc2x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc2x4567),
vmull_u8(va2x00000000, vget_high_u8(vb01234567x0))));
vacc3x0123 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x0123),
vmull_u8(va3x00000000, vget_low_u8(vb01234567x0))));
vacc3x4567 = vreinterpretq_s32_u32(vpadalq_u16(
vreinterpretq_u32_s32(vacc3x4567),
vmull_u8(va3x00000000, vget_high_u8(vb01234567x0))));
}
const int32x4_t vmultiplier =
vld1q_dup_s32(&requantization_params->neon.multiplier);
vacc0x0123 = vqrdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqrdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqrdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqrdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqrdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqrdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqrdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqrdmulhq_s32(vacc3x4567, vmultiplier);
const int32x4_t vright_shift =
vld1q_dup_s32(&requantization_params->neon.right_shift);
const int32x4_t vzero_shift_mask =
vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));
vacc0x0123 =
vsraq_n_s32(vacc0x0123, vbicq_s32(vacc0x0123, vzero_shift_mask), 31);
vacc0x4567 =
vsraq_n_s32(vacc0x4567, vbicq_s32(vacc0x4567, vzero_shift_mask), 31);
vacc1x0123 =
vsraq_n_s32(vacc1x0123, vbicq_s32(vacc1x0123, vzero_shift_mask), 31);
vacc1x4567 =
vsraq_n_s32(vacc1x4567, vbicq_s32(vacc1x4567, vzero_shift_mask), 31);
vacc2x0123 =
vsraq_n_s32(vacc2x0123, vbicq_s32(vacc2x0123, vzero_shift_mask), 31);
vacc2x4567 =
vsraq_n_s32(vacc2x4567, vbicq_s32(vacc2x4567, vzero_shift_mask), 31);
vacc3x0123 =
vsraq_n_s32(vacc3x0123, vbicq_s32(vacc3x0123, vzero_shift_mask), 31);
vacc3x4567 =
vsraq_n_s32(vacc3x4567, vbicq_s32(vacc3x4567, vzero_shift_mask), 31);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_shift);
const int16x8_t vzero_point =
vld1q_dup_s16(&requantization_params->neon.zero_point);
#ifdef __aarch64__
const int16x8_t vacc0x01234567 = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), vzero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), vzero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), vzero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), vzero_point);
uint8x16_t vout0x01234567_1x01234567 =
vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
uint8x16_t vout2x01234567_3x01234567 =
vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(
vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)),
vzero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(
vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)),
vzero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(
vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)),
vzero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(
vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)),
vzero_point);
uint8x16_t vout0x01234567_1x01234567 =
vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
uint8x16_t vout2x01234567_3x01234567 =
vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
#endif
const uint8x16_t vmin = vld1q_dup_u8(&requantization_params->neon.min);
const uint8x16_t vmax = vld1q_dup_u8(&requantization_params->neon.max);
vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, vmin);
vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, vmin);
vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, vmax);
vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, vmax);
uint8_t* c0 = c;
uint8_t* c1 = c0;
if (mr >= 2) {
c1 += c_stride;
}
uint8_t* c2 = c1;
if (mr > 2) {
c2 += c_stride;
}
uint8_t* c3 = c2;
if (mr == 4) {
c3 += c_stride;
}
if (nr == 8) {
vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567));
vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567));
vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567));
vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567));
} else {
if (nr >= 4) {
vst1q_lane_u32(
__builtin_assume_aligned(c0, 1),
vreinterpretq_u32_u8(vout0x01234567_1x01234567),
0);
c0 += 4;
vst1q_lane_u32(
__builtin_assume_aligned(c1, 1),
vreinterpretq_u32_u8(vout0x01234567_1x01234567),
2);
c1 += 4;
vst1q_lane_u32(
__builtin_assume_aligned(c2, 1),
vreinterpretq_u32_u8(vout2x01234567_3x01234567),
0);
c2 += 4;
vst1q_lane_u32(
__builtin_assume_aligned(c3, 1),
vreinterpretq_u32_u8(vout2x01234567_3x01234567),
2);
c3 += 4;
vout0x01234567_1x01234567 =
vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
vout2x01234567_3x01234567 =
vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
nr -= 4;
}
if (nr >= 2) {
vst1q_lane_u16(
__builtin_assume_aligned(c0, 1),
vreinterpretq_u16_u8(vout0x01234567_1x01234567),
0);
c0 += 2;
vst1q_lane_u16(
__builtin_assume_aligned(c1, 1),
vreinterpretq_u16_u8(vout0x01234567_1x01234567),
4);
c1 += 2;
vst1q_lane_u16(
__builtin_assume_aligned(c2, 1),
vreinterpretq_u16_u8(vout2x01234567_3x01234567),
0);
c2 += 2;
vst1q_lane_u16(
__builtin_assume_aligned(c3, 1),
vreinterpretq_u16_u8(vout2x01234567_3x01234567),
4);
c3 += 2;
vout0x01234567_1x01234567 =
vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
vout2x01234567_3x01234567 =
vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
nr -= 2;
}
if (nr != 0) {
vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
}
}
}
| 22,120
| 39.663603
| 76
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/6x4-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <arm_neon.h>
#include <qnnpack/q8gemm.h>
#include <requantization/runtime-neon.h>
void pytorch_q8gemm_ukernel_6x4__neon(
size_t mr,
size_t nr,
size_t k,
const uint8_t* restrict a,
size_t a_stride,
const void* restrict w,
uint8_t* restrict c,
size_t c_stride,
size_t output_channel_index,
const union pytorch_qnnp_conv_quantization_params
quantization_params[restrict static 1]) {
int32x4_t vacc0x0123 = vld1q_s32(w);
w = (const void*)((uintptr_t)w + 16);
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc3x0123 = vacc0x0123;
int32x4_t vacc4x0123 = vacc0x0123;
int32x4_t vacc5x0123 = vacc0x0123;
const uint8_t* a0 = a;
const uint8_t* a1 = (const uint8_t*)((uintptr_t)a0 + a_stride);
if (mr < 2) {
a1 = a0;
}
const uint8_t* a2 = (const uint8_t*)((uintptr_t)a1 + a_stride);
if (mr <= 2) {
a2 = a1;
}
const uint8_t* a3 = (const uint8_t*)((uintptr_t)a2 + a_stride);
if (mr < 4) {
a3 = a2;
}
const uint8_t* a4 = (const uint8_t*)((uintptr_t)a3 + a_stride);
if (mr <= 4) {
a4 = a3;
};
const uint8_t* a5 = (const uint8_t*)((uintptr_t)a4 + a_stride);
if (mr != 6) {
a5 = a4;
}
const uint8x8_t va_zero_point =
vld1_dup_u8((const uint8_t*)&quantization_params->neon.input_zero_point);
uint8x8_t vb_zero_point =
vld1_u8((const uint8_t*)&quantization_params->neon.kernel_zero_points
[output_channel_index]);
// Since only lower 4 values are used in this kernel. We replicate lower 4
// values in upper 4 values. Still we end up loading 8 values assuming
// zero point array is always multiple of 8.
vb_zero_point = vset_lane_u8(vget_lane_u8(vb_zero_point, 0), vb_zero_point, 4);
vb_zero_point = vset_lane_u8(vget_lane_u8(vb_zero_point, 1), vb_zero_point, 5);
vb_zero_point = vset_lane_u8(vget_lane_u8(vb_zero_point, 2), vb_zero_point, 6);
vb_zero_point = vset_lane_u8(vget_lane_u8(vb_zero_point, 3), vb_zero_point, 7);
for (; k >= 8; k -= 8) {
const uint8x8_t va0 = vld1_u8(a0);
a0 += 8;
const int16x8_t vxa0 =
vreinterpretq_s16_u16(sub_zero_point(va0, va_zero_point));
const uint8x8_t va1 = vld1_u8(a1);
a1 += 8;
const int16x8_t vxa1 =
vreinterpretq_s16_u16(sub_zero_point(va1, va_zero_point));
const uint8x8_t va2 = vld1_u8(a2);
a2 += 8;
const int16x8_t vxa2 =
vreinterpretq_s16_u16(sub_zero_point(va2, va_zero_point));
const uint8x8_t va3 = vld1_u8(a3);
a3 += 8;
const int16x8_t vxa3 =
vreinterpretq_s16_u16(sub_zero_point(va3, va_zero_point));
const uint8x8_t va4 = vld1_u8(a4);
a4 += 8;
const int16x8_t vxa4 =
vreinterpretq_s16_u16(sub_zero_point(va4, va_zero_point));
const uint8x8_t va5 = vld1_u8(a5);
a5 += 8;
const int16x8_t vxa5 =
vreinterpretq_s16_u16(sub_zero_point(va5, va_zero_point));
const uint8x8_t vb0123c01 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb0123c01 =
vreinterpretq_s16_u16(vsubl_u8(vb0123c01, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb0123c01), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb0123c01), vget_low_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb0123c01), vget_low_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb0123c01), vget_low_s16(vxa3), 0);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_low_s16(vxb0123c01), vget_low_s16(vxa4), 0);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_low_s16(vxb0123c01), vget_low_s16(vxa5), 0);
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_high_s16(vxb0123c01), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_high_s16(vxb0123c01), vget_low_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_high_s16(vxb0123c01), vget_low_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_high_s16(vxb0123c01), vget_low_s16(vxa3), 1);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_high_s16(vxb0123c01), vget_low_s16(vxa4), 1);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_high_s16(vxb0123c01), vget_low_s16(vxa5), 1);
const uint8x8_t vb0123c23 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb0123c23 =
vreinterpretq_s16_u16(vsubl_u8(vb0123c23, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb0123c23), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb0123c23), vget_low_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb0123c23), vget_low_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb0123c23), vget_low_s16(vxa3), 2);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_low_s16(vxb0123c23), vget_low_s16(vxa4), 2);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_low_s16(vxb0123c23), vget_low_s16(vxa5), 2);
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_high_s16(vxb0123c23), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_high_s16(vxb0123c23), vget_low_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_high_s16(vxb0123c23), vget_low_s16(vxa2), 3);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_high_s16(vxb0123c23), vget_low_s16(vxa3), 3);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_high_s16(vxb0123c23), vget_low_s16(vxa4), 3);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_high_s16(vxb0123c23), vget_low_s16(vxa5), 3);
const uint8x8_t vb0123c45 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb0123c45 =
vreinterpretq_s16_u16(vsubl_u8(vb0123c45, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb0123c45), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb0123c45), vget_high_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb0123c45), vget_high_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb0123c45), vget_high_s16(vxa3), 0);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_low_s16(vxb0123c45), vget_high_s16(vxa4), 0);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_low_s16(vxb0123c45), vget_high_s16(vxa5), 0);
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_high_s16(vxb0123c45), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_high_s16(vxb0123c45), vget_high_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_high_s16(vxb0123c45), vget_high_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_high_s16(vxb0123c45), vget_high_s16(vxa3), 1);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_high_s16(vxb0123c45), vget_high_s16(vxa4), 1);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_high_s16(vxb0123c45), vget_high_s16(vxa5), 1);
const uint8x8_t vb0123c67 = vld1_u8(w);
w = (const void*)((uintptr_t)w + 8);
const int16x8_t vxb0123c67 =
vreinterpretq_s16_u16(vsubl_u8(vb0123c67, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb0123c67), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb0123c67), vget_high_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb0123c67), vget_high_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb0123c67), vget_high_s16(vxa3), 2);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_low_s16(vxb0123c67), vget_high_s16(vxa4), 2);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_low_s16(vxb0123c67), vget_high_s16(vxa5), 2);
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_high_s16(vxb0123c67), vget_high_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_high_s16(vxb0123c67), vget_high_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_high_s16(vxb0123c67), vget_high_s16(vxa2), 3);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_high_s16(vxb0123c67), vget_high_s16(vxa3), 3);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_high_s16(vxb0123c67), vget_high_s16(vxa4), 3);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_high_s16(vxb0123c67), vget_high_s16(vxa5), 3);
}
if (k != 0) {
const size_t a_predecrement = 8 - k;
const int64x1_t va_shift = vmov_n_s64(-8 * a_predecrement);
const uint8x8_t va0 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(a0 - a_predecrement)), va_shift));
const int16x8_t vxa0 =
vreinterpretq_s16_u16(sub_zero_point(va0, va_zero_point));
const uint8x8_t va1 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(a1 - a_predecrement)), va_shift));
const int16x8_t vxa1 =
vreinterpretq_s16_u16(sub_zero_point(va1, va_zero_point));
const uint8x8_t va2 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(a2 - a_predecrement)), va_shift));
const int16x8_t vxa2 =
vreinterpretq_s16_u16(sub_zero_point(va2, va_zero_point));
const uint8x8_t va3 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(a3 - a_predecrement)), va_shift));
const int16x8_t vxa3 =
vreinterpretq_s16_u16(sub_zero_point(va3, va_zero_point));
const uint8x8_t va4 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(a4 - a_predecrement)), va_shift));
const int16x8_t vxa4 =
vreinterpretq_s16_u16(sub_zero_point(va4, va_zero_point));
const uint8x8_t va5 = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(a5 - a_predecrement)), va_shift));
const int16x8_t vxa5 =
vreinterpretq_s16_u16(sub_zero_point(va5, va_zero_point));
const uint8x8_t vb0123c0 = vreinterpret_u8_u32(vld1_dup_u32(w));
w = (const void*)((uintptr_t)w + 4);
const int16x8_t vxb0123c0 =
vreinterpretq_s16_u16(vsubl_u8(vb0123c0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb0123c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb0123c0), vget_low_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb0123c0), vget_low_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb0123c0), vget_low_s16(vxa3), 0);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_low_s16(vxb0123c0), vget_low_s16(vxa4), 0);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_low_s16(vxb0123c0), vget_low_s16(vxa5), 0);
if (k >= 2) {
const uint8x8_t vb0123c1 = vreinterpret_u8_u32(vld1_dup_u32(w));
w = (const void*)((uintptr_t)w + 4);
const int16x8_t vxb0123c1 =
vreinterpretq_s16_u16(vsubl_u8(vb0123c1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb0123c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb0123c1), vget_low_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb0123c1), vget_low_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb0123c1), vget_low_s16(vxa3), 1);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_low_s16(vxb0123c1), vget_low_s16(vxa4), 1);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_low_s16(vxb0123c1), vget_low_s16(vxa5), 1);
if (k > 2) {
const uint8x8_t vb0123c2 = vreinterpret_u8_u32(vld1_dup_u32(w));
w = (const void*)((uintptr_t)w + 4);
const int16x8_t vxb0123c2 =
vreinterpretq_s16_u16(vsubl_u8(vb0123c2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb0123c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb0123c2), vget_low_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb0123c2), vget_low_s16(vxa2), 2);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb0123c2), vget_low_s16(vxa3), 2);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_low_s16(vxb0123c2), vget_low_s16(vxa4), 2);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_low_s16(vxb0123c2), vget_low_s16(vxa5), 2);
if (k >= 4) {
const uint8x8_t vb0123c3 = vreinterpret_u8_u32(vld1_dup_u32(w));
w = (const void*)((uintptr_t)w + 4);
const int16x8_t vxb0123c3 =
vreinterpretq_s16_u16(vsubl_u8(vb0123c3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb0123c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb0123c3), vget_low_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb0123c3), vget_low_s16(vxa2), 3);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb0123c3), vget_low_s16(vxa3), 3);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_low_s16(vxb0123c3), vget_low_s16(vxa4), 3);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_low_s16(vxb0123c3), vget_low_s16(vxa5), 3);
if (k > 4) {
const uint8x8_t vb0123c4 = vreinterpret_u8_u32(vld1_dup_u32(w));
w = (const void*)((uintptr_t)w + 4);
const int16x8_t vxb0123c4 =
vreinterpretq_s16_u16(vsubl_u8(vb0123c4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb0123c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb0123c4), vget_high_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb0123c4), vget_high_s16(vxa2), 0);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb0123c4), vget_high_s16(vxa3), 0);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_low_s16(vxb0123c4), vget_high_s16(vxa4), 0);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_low_s16(vxb0123c4), vget_high_s16(vxa5), 0);
if (k >= 6) {
const uint8x8_t vb0123c5 = vreinterpret_u8_u32(vld1_dup_u32(w));
w = (const void*)((uintptr_t)w + 4);
const int16x8_t vxb0123c5 =
vreinterpretq_s16_u16(vsubl_u8(vb0123c5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123, vget_low_s16(vxb0123c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123, vget_low_s16(vxb0123c5), vget_high_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123, vget_low_s16(vxb0123c5), vget_high_s16(vxa2), 1);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123, vget_low_s16(vxb0123c5), vget_high_s16(vxa3), 1);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123, vget_low_s16(vxb0123c5), vget_high_s16(vxa4), 1);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123, vget_low_s16(vxb0123c5), vget_high_s16(vxa5), 1);
if (k > 6) {
const uint8x8_t vb0123c6 = vreinterpret_u8_u32(vld1_dup_u32(w));
const int16x8_t vxb0123c6 =
vreinterpretq_s16_u16(vsubl_u8(vb0123c6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(
vacc0x0123,
vget_low_s16(vxb0123c6),
vget_high_s16(vxa0),
2);
vacc1x0123 = vmlal_lane_s16(
vacc1x0123,
vget_low_s16(vxb0123c6),
vget_high_s16(vxa1),
2);
vacc2x0123 = vmlal_lane_s16(
vacc2x0123,
vget_low_s16(vxb0123c6),
vget_high_s16(vxa2),
2);
vacc3x0123 = vmlal_lane_s16(
vacc3x0123,
vget_low_s16(vxb0123c6),
vget_high_s16(vxa3),
2);
vacc4x0123 = vmlal_lane_s16(
vacc4x0123,
vget_low_s16(vxb0123c6),
vget_high_s16(vxa4),
2);
vacc5x0123 = vmlal_lane_s16(
vacc5x0123,
vget_low_s16(vxb0123c6),
vget_high_s16(vxa5),
2);
}
}
}
}
}
}
}
const float32x4_t requantization_scale_v =
vld1q_f32(
&quantization_params->neon.requantization_scales[
output_channel_index]);
const float32x4_t vacc0x0123_f =
vmulq_f32(vcvtq_f32_s32(vacc0x0123), requantization_scale_v);
const float32x4_t vacc1x0123_f =
vmulq_f32(vcvtq_f32_s32(vacc1x0123), requantization_scale_v);
const float32x4_t vacc2x0123_f =
vmulq_f32(vcvtq_f32_s32(vacc2x0123), requantization_scale_v);
const float32x4_t vacc3x0123_f =
vmulq_f32(vcvtq_f32_s32(vacc3x0123), requantization_scale_v);
const float32x4_t vacc4x0123_f =
vmulq_f32(vcvtq_f32_s32(vacc4x0123), requantization_scale_v);
const float32x4_t vacc5x0123_f =
vmulq_f32(vcvtq_f32_s32(vacc5x0123), requantization_scale_v);
#ifdef __aarch64__
const int16x8_t voutput_zero_point =
vld1q_dup_s16(&quantization_params->neon.output_zero_point);
vacc0x0123 = vcvtnq_s32_f32(vacc0x0123_f);
vacc1x0123 = vcvtnq_s32_f32(vacc1x0123_f);
vacc2x0123 = vcvtnq_s32_f32(vacc2x0123_f);
vacc3x0123 = vcvtnq_s32_f32(vacc3x0123_f);
vacc4x0123 = vcvtnq_s32_f32(vacc4x0123_f);
vacc5x0123 = vcvtnq_s32_f32(vacc5x0123_f);
const int16x8_t vacc01x0123 = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc1x0123), voutput_zero_point);
const int16x8_t vacc23x0123 = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc3x0123), voutput_zero_point);
const int16x8_t vacc45x0123 = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc5x0123), voutput_zero_point);
uint8x16_t vout0123x0123 =
vqmovun_high_s16(vqmovun_s16(vacc01x0123), vacc23x0123);
uint8x8_t vout45x0123 = vqmovun_s16(vacc45x0123);
const uint8x16_t voutput_min =
vld1q_dup_u8(&quantization_params->neon.output_min);
const uint8x16_t voutput_max =
vld1q_dup_u8(&quantization_params->neon.output_max);
vout0123x0123 = vmaxq_u8(vout0123x0123, voutput_min);
vout45x0123 = vmax_u8(vout45x0123, vget_low_u8(voutput_min));
vout0123x0123 = vminq_u8(vout0123x0123, voutput_max);
vout45x0123 = vmin_u8(vout45x0123, vget_low_u8(voutput_max));
#else
const float32x4_t vfmin = vdupq_n_f32(quantization_params->neon.vfmin);
const float32x4_t vfmax = vdupq_n_f32(quantization_params->neon.vfmax);
const float32x4_t vfmagic = vdupq_n_f32(quantization_params->neon.vfmagic);
const int32x4_t vimagic = vdupq_n_s32(quantization_params->neon.vimagic);
const float32x4_t vacc0x0123_f_clamped =
vminq_f32(vmaxq_f32(vacc0x0123_f, vfmin), vfmax);
const float32x4_t vacc1x0123_f_clamped =
vminq_f32(vmaxq_f32(vacc1x0123_f, vfmin), vfmax);
const float32x4_t vacc2x0123_f_clamped =
vminq_f32(vmaxq_f32(vacc2x0123_f, vfmin), vfmax);
const float32x4_t vacc3x0123_f_clamped =
vminq_f32(vmaxq_f32(vacc3x0123_f, vfmin), vfmax);
const float32x4_t vacc4x0123_f_clamped =
vminq_f32(vmaxq_f32(vacc4x0123_f, vfmin), vfmax);
const float32x4_t vacc5x0123_f_clamped =
vminq_f32(vmaxq_f32(vacc5x0123_f, vfmin), vfmax);
vacc0x0123 = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc0x0123_f_clamped, vfmagic)), vimagic);
vacc1x0123 = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc1x0123_f_clamped, vfmagic)), vimagic);
vacc2x0123 = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc2x0123_f_clamped, vfmagic)), vimagic);
vacc3x0123 = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc3x0123_f_clamped, vfmagic)), vimagic);
vacc4x0123 = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc4x0123_f_clamped, vfmagic)), vimagic);
vacc5x0123 = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(vacc5x0123_f_clamped, vfmagic)), vimagic);
const int16x8_t vacc01x0123 =
vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc1x0123));
const int16x8_t vacc23x0123 =
vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc3x0123));
const int16x8_t vacc45x0123 =
vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc5x0123));
uint8x16_t vout0123x0123 =
vcombine_u8(vqmovun_s16(vacc01x0123), vqmovun_s16(vacc23x0123));
uint8x8_t vout45x0123 = vqmovun_s16(vacc45x0123);
#endif
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*)((uintptr_t)c0 + c_stride);
if (mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*)((uintptr_t)c1 + c_stride);
if (mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*)((uintptr_t)c2 + c_stride);
if (mr < 4) {
c3 = c2;
}
uint8_t* c4 = (uint8_t*)((uintptr_t)c3 + c_stride);
if (mr <= 4) {
c4 = c3;
}
uint8_t* c5 = (uint8_t*)((uintptr_t)c4 + c_stride);
if (mr != 6) {
c5 = c4;
}
if (nr == 4) {
vst1q_lane_u32(
__builtin_assume_aligned(c0, 1),
vreinterpretq_u32_u8(vout0123x0123),
0);
vst1q_lane_u32(
__builtin_assume_aligned(c1, 1),
vreinterpretq_u32_u8(vout0123x0123),
1);
vst1q_lane_u32(
__builtin_assume_aligned(c2, 1),
vreinterpretq_u32_u8(vout0123x0123),
2);
vst1q_lane_u32(
__builtin_assume_aligned(c3, 1),
vreinterpretq_u32_u8(vout0123x0123),
3);
vst1_lane_u32(
__builtin_assume_aligned(c4, 1), vreinterpret_u32_u8(vout45x0123), 0);
vst1_lane_u32(
__builtin_assume_aligned(c5, 1), vreinterpret_u32_u8(vout45x0123), 1);
} else {
if (nr >= 2) {
vst1q_lane_u16(
__builtin_assume_aligned(c0, 1),
vreinterpretq_u16_u8(vout0123x0123),
0);
c0 += 2;
vst1q_lane_u16(
__builtin_assume_aligned(c1, 1),
vreinterpretq_u16_u8(vout0123x0123),
2);
c1 += 2;
vst1q_lane_u16(
__builtin_assume_aligned(c2, 1),
vreinterpretq_u16_u8(vout0123x0123),
4);
c2 += 2;
vst1q_lane_u16(
__builtin_assume_aligned(c3, 1),
vreinterpretq_u16_u8(vout0123x0123),
6);
c3 += 2;
vst1_lane_u16(
__builtin_assume_aligned(c4, 1), vreinterpret_u16_u8(vout45x0123), 0);
c4 += 2;
vst1_lane_u16(
__builtin_assume_aligned(c5, 1), vreinterpret_u16_u8(vout45x0123), 2);
c5 += 2;
vout0123x0123 = vextq_u8(vout0123x0123, vout0123x0123, 2);
vout45x0123 = vext_u8(vout45x0123, vout45x0123, 2);
nr -= 2;
}
if (nr != 0) {
vst1q_lane_u8(__builtin_assume_aligned(c0, 1), vout0123x0123, 0);
vst1q_lane_u8(__builtin_assume_aligned(c1, 1), vout0123x0123, 4);
vst1q_lane_u8(__builtin_assume_aligned(c2, 1), vout0123x0123, 8);
vst1q_lane_u8(__builtin_assume_aligned(c3, 1), vout0123x0123, 12);
vst1_lane_u8(__builtin_assume_aligned(c4, 1), vout45x0123, 0);
vst1_lane_u8(__builtin_assume_aligned(c5, 1), vout45x0123, 4);
}
}
}
| 23,968
| 40.468858
| 81
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x4-packA-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <immintrin.h>
#include <qnnpack/q8gemm_sparse.h>
#include <requantization/runtime-sse2.h>
#include "8x4c1x4-packed-sse2.h"
// This is a super slow kernel in that it does not use intrinsics to
// tranpose. Since this is for x86 we are not optimizing it.
// For ARM this will be optimized.
void pytorch_q8gemm_sparse_packA_ukernel_8x4__sse2(
const size_t mr,
const size_t K,
const uint8_t* a,
const size_t a_stride,
uint8_t* a_packed) {
// Packed A format.
// 8kx4m blocks for alls blocks given 4 rows (4m) are placed in contiguous memory.
// Original A
// --------- K ----------- -- (K + 4 - 1) / 4 --
// | | | |
// | | (M + 8 - 1)/8 |
// | | Packed | |
// M | => |-------------------|
// | | Thus Packed A has (K + 4 - 1)/4 * (M + 8 -1)/8 blocks
// | |
// |---------------------|
//
// Each 8 x 4 blocks is transposed and stored.
// Each of the (K + 4 - 1)/4 blocks for a given group of 8 m blocks
// are stored adjacent in memory
// Thus, each block:
// |----8m-----|----8m-----|
// 4k | | ..... (K + 4 - 1)/4 blocks
// |-----------|-----------|
// This locality helps in loading 8kx8m blocks of activations
// Note when M is not multiple of 8, the rest can contain arbitrary
// data in packed A as we will not be writing those out.
// This wil be taken care by just copying the appropriate valid data
// Note that parts of A that are not filled are:
// Remainder of M blocks. So some m values are random. This is ok
// because when sparse gemm accumulated into them, those values will not
// be written out.
// Remainder of K blocks. When K is not multiple of 4 the remaining k
// in 4x8 blocks are also random. this is also ok because the packed
// weights will be packed with zeros such that multiplication will result
// in zero.
uint32_t num_k_blocks = (K + COL_BLOCK_SIZE -1) / COL_BLOCK_SIZE;
for (uint32_t k_block = 0; k_block < num_k_blocks - 1; k_block++) {
for (uint32_t k = 0; k < COL_BLOCK_SIZE; k++) {
for (uint32_t m = 0; m < mr; m++) {
*(a_packed + k_block * PACKED_A_BLOCK_SIZE + k * 8 + m) =
*(a + m * a_stride + k_block * COL_BLOCK_SIZE + k);
}
}
}
for (uint32_t k = 0; k < (K - ((num_k_blocks - 1) * COL_BLOCK_SIZE)); k++) {
for (uint32_t m = 0; m < mr; m++) {
*(a_packed + (num_k_blocks - 1) * PACKED_A_BLOCK_SIZE + k * 8 + m) =
*(a + m * a_stride + (num_k_blocks - 1) * COL_BLOCK_SIZE + k);
}
}
}
| 2,929
| 38.066667
| 89
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x4c1x4-dq-packedA-sse2.c
|
#define KERNEL_NAME pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4_packedA_w32__sse2
#define W_INDEX_DTYPE uint32_t
#include "8x4c1x4-dq-packedA-sse2.h"
#undef KERNEL_NAME
#undef W_INDEX_DTYPE
#define KERNEL_NAME pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4_packedA_w16__sse2
#define W_INDEX_DTYPE uint16_t
#include "8x4c1x4-dq-packedA-sse2.h"
#undef KERNEL_NAME
#undef W_INDEX_DTYPE
#define KERNEL_NAME pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4_packedA_w8__sse2
#define W_INDEX_DTYPE uint8_t
#include "8x4c1x4-dq-packedA-sse2.h"
#undef KERNEL_NAME
#undef W_INDEX_DTYPE
| 561
| 30.222222
| 78
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8vadd/neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <arm_neon.h>
#include <qnnpack/common.h>
#include <qnnpack/q8vadd.h>
void pytorch_q8vadd_ukernel__neon(
size_t n,
const uint8_t* a,
const uint8_t* b,
uint8_t* y,
const union pytorch_qnnp_add_quantization_params
quantization_params[restrict static 1]) {
const uint8x8_t va_zero_point =
vld1_dup_u8(&quantization_params->neon.a_zero_point);
const uint8x8_t vb_zero_point =
vld1_dup_u8(&quantization_params->neon.b_zero_point);
const int16x8_t vy_zero_point =
vld1q_dup_s16(&quantization_params->neon.y_zero_point);
const int32x4_t va_multiplier =
vld1q_dup_s32(&quantization_params->neon.a_multiplier);
const int32x4_t vb_multiplier =
vld1q_dup_s32(&quantization_params->neon.b_multiplier);
const int32x4_t vright_shift =
vld1q_dup_s32(&quantization_params->neon.right_shift);
const int32x4_t vzero_shift_mask =
vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));
const uint8x16_t vy_max = vld1q_dup_u8(&quantization_params->neon.y_max);
const uint8x16_t vy_min = vld1q_dup_u8(&quantization_params->neon.y_min);
if
PYTORCH_QNNP_LIKELY(n >= 8) {
#ifdef __aarch64__
for (; n >= 32; n -= 32) {
const uint8x16_t va01 = vld1q_u8(a);
a += 16;
const uint8x16_t vb01 = vld1q_u8(b);
b += 16;
const uint8x16_t va23 = vld1q_u8(a);
a += 16;
const uint8x16_t vb23 = vld1q_u8(b);
b += 16;
/* Subtract zero point */
const int16x8_t vxa0 =
vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va01), va_zero_point));
const int16x8_t vxb0 =
vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb01), vb_zero_point));
const int16x8_t vxa1 =
vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va01), va_zero_point));
const int16x8_t vxb1 =
vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb01), vb_zero_point));
const int16x8_t vxa2 =
vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va23), va_zero_point));
const int16x8_t vxb2 =
vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb23), vb_zero_point));
const int16x8_t vxa3 =
vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va23), va_zero_point));
const int16x8_t vxb3 =
vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb23), vb_zero_point));
/* Multiply by factors and accumulate products */
int32x4_t vacc0_lo =
vmulq_s32(vmovl_s16(vget_low_s16(vxa0)), va_multiplier);
int32x4_t vacc1_lo =
vmulq_s32(vmovl_s16(vget_low_s16(vxa1)), va_multiplier);
int32x4_t vacc2_lo =
vmulq_s32(vmovl_s16(vget_low_s16(vxa2)), va_multiplier);
int32x4_t vacc3_lo =
vmulq_s32(vmovl_s16(vget_low_s16(vxa3)), va_multiplier);
int32x4_t vacc0_hi = vmulq_s32(vmovl_high_s16(vxa0), va_multiplier);
int32x4_t vacc1_hi = vmulq_s32(vmovl_high_s16(vxa1), va_multiplier);
int32x4_t vacc2_hi = vmulq_s32(vmovl_high_s16(vxa2), va_multiplier);
int32x4_t vacc3_hi = vmulq_s32(vmovl_high_s16(vxa3), va_multiplier);
vacc0_lo =
vmlaq_s32(vacc0_lo, vmovl_s16(vget_low_s16(vxb0)), vb_multiplier);
vacc1_lo =
vmlaq_s32(vacc1_lo, vmovl_s16(vget_low_s16(vxb1)), vb_multiplier);
vacc2_lo =
vmlaq_s32(vacc2_lo, vmovl_s16(vget_low_s16(vxb2)), vb_multiplier);
vacc3_lo =
vmlaq_s32(vacc3_lo, vmovl_s16(vget_low_s16(vxb3)), vb_multiplier);
vacc0_hi = vmlaq_s32(vacc0_hi, vmovl_high_s16(vxb0), vb_multiplier);
vacc1_hi = vmlaq_s32(vacc1_hi, vmovl_high_s16(vxb1), vb_multiplier);
vacc2_hi = vmlaq_s32(vacc2_hi, vmovl_high_s16(vxb2), vb_multiplier);
vacc3_hi = vmlaq_s32(vacc3_hi, vmovl_high_s16(vxb3), vb_multiplier);
/* Shift right and round */
vacc0_lo =
vsraq_n_s32(vacc0_lo, vbicq_s32(vacc0_lo, vzero_shift_mask), 31);
vacc1_lo =
vsraq_n_s32(vacc1_lo, vbicq_s32(vacc1_lo, vzero_shift_mask), 31);
vacc2_lo =
vsraq_n_s32(vacc2_lo, vbicq_s32(vacc2_lo, vzero_shift_mask), 31);
vacc3_lo =
vsraq_n_s32(vacc3_lo, vbicq_s32(vacc3_lo, vzero_shift_mask), 31);
vacc0_hi =
vsraq_n_s32(vacc0_hi, vbicq_s32(vacc0_hi, vzero_shift_mask), 31);
vacc1_hi =
vsraq_n_s32(vacc1_hi, vbicq_s32(vacc1_hi, vzero_shift_mask), 31);
vacc2_hi =
vsraq_n_s32(vacc2_hi, vbicq_s32(vacc2_hi, vzero_shift_mask), 31);
vacc3_hi =
vsraq_n_s32(vacc3_hi, vbicq_s32(vacc3_hi, vzero_shift_mask), 31);
vacc0_lo = vrshlq_s32(vacc0_lo, vright_shift);
vacc1_lo = vrshlq_s32(vacc1_lo, vright_shift);
vacc2_lo = vrshlq_s32(vacc2_lo, vright_shift);
vacc3_lo = vrshlq_s32(vacc3_lo, vright_shift);
vacc0_hi = vrshlq_s32(vacc0_hi, vright_shift);
vacc1_hi = vrshlq_s32(vacc1_hi, vright_shift);
vacc2_hi = vrshlq_s32(vacc2_hi, vright_shift);
vacc3_hi = vrshlq_s32(vacc3_hi, vright_shift);
/* Pack, saturate, and add output zero point */
const int16x8_t vacc0 = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc0_lo), vacc0_hi), vy_zero_point);
const int16x8_t vacc1 = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc1_lo), vacc1_hi), vy_zero_point);
const int16x8_t vacc2 = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc2_lo), vacc2_hi), vy_zero_point);
const int16x8_t vacc3 = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc3_lo), vacc3_hi), vy_zero_point);
uint8x16_t vy01 = vqmovun_high_s16(vqmovun_s16(vacc0), vacc1);
uint8x16_t vy23 = vqmovun_high_s16(vqmovun_s16(vacc2), vacc3);
vy01 = vmaxq_u8(vy01, vy_min);
vy23 = vmaxq_u8(vy23, vy_min);
vy01 = vminq_u8(vy01, vy_max);
vy23 = vminq_u8(vy23, vy_max);
vst1q_u8(y, vy01);
y += 16;
vst1q_u8(y, vy23);
y += 16;
}
#else
for (; n >= 16; n -= 16) {
const uint8x16_t va01 = vld1q_u8(a);
a += 16;
const uint8x16_t vb01 = vld1q_u8(b);
b += 16;
/* Subtract zero point */
const int16x8_t vxa0 =
vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va01), va_zero_point));
const int16x8_t vxb0 =
vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb01), vb_zero_point));
const int16x8_t vxa1 =
vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va01), va_zero_point));
const int16x8_t vxb1 =
vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb01), vb_zero_point));
/* Multiply by factors and accumulate products */
int32x4_t vacc0_lo =
vmulq_s32(vmovl_s16(vget_low_s16(vxa0)), va_multiplier);
int32x4_t vacc1_lo =
vmulq_s32(vmovl_s16(vget_low_s16(vxa1)), va_multiplier);
int32x4_t vacc0_hi =
vmulq_s32(vmovl_s16(vget_high_s16(vxa0)), va_multiplier);
int32x4_t vacc1_hi =
vmulq_s32(vmovl_s16(vget_high_s16(vxa1)), va_multiplier);
__builtin_prefetch(a + 640);
__builtin_prefetch(b + 640);
vacc0_lo =
vmlaq_s32(vacc0_lo, vmovl_s16(vget_low_s16(vxb0)), vb_multiplier);
vacc1_lo =
vmlaq_s32(vacc1_lo, vmovl_s16(vget_low_s16(vxb1)), vb_multiplier);
vacc0_hi =
vmlaq_s32(vacc0_hi, vmovl_s16(vget_high_s16(vxb0)), vb_multiplier);
vacc1_hi =
vmlaq_s32(vacc1_hi, vmovl_s16(vget_high_s16(vxb1)), vb_multiplier);
/* Shift right and round */
vacc0_lo =
vsraq_n_s32(vacc0_lo, vbicq_s32(vacc0_lo, vzero_shift_mask), 31);
vacc1_lo =
vsraq_n_s32(vacc1_lo, vbicq_s32(vacc1_lo, vzero_shift_mask), 31);
vacc0_hi =
vsraq_n_s32(vacc0_hi, vbicq_s32(vacc0_hi, vzero_shift_mask), 31);
vacc1_hi =
vsraq_n_s32(vacc1_hi, vbicq_s32(vacc1_hi, vzero_shift_mask), 31);
vacc0_lo = vrshlq_s32(vacc0_lo, vright_shift);
vacc1_lo = vrshlq_s32(vacc1_lo, vright_shift);
vacc0_hi = vrshlq_s32(vacc0_hi, vright_shift);
vacc1_hi = vrshlq_s32(vacc1_hi, vright_shift);
/* Pack, saturate, and add output zero point */
const int16x8_t vacc0 = vqaddq_s16(
vcombine_s16(vqmovn_s32(vacc0_lo), vqmovn_s32(vacc0_hi)),
vy_zero_point);
const int16x8_t vacc1 = vqaddq_s16(
vcombine_s16(vqmovn_s32(vacc1_lo), vqmovn_s32(vacc1_hi)),
vy_zero_point);
uint8x16_t vy01 = vcombine_u8(vqmovun_s16(vacc0), vqmovun_s16(vacc1));
vy01 = vmaxq_u8(vy01, vy_min);
vy01 = vminq_u8(vy01, vy_max);
vst1q_u8(y, vy01);
y += 16;
}
#endif
for (; n >= 8; n -= 8) {
const uint8x8_t va = vld1_u8(a);
a += 8;
const uint8x8_t vb = vld1_u8(b);
b += 8;
/* Subtract zero point */
const int16x8_t vxa =
vreinterpretq_s16_u16(vsubl_u8(va, va_zero_point));
const int16x8_t vxb =
vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
/* Multiply by factors and accumulate products */
int32x4_t vacc_lo =
vmulq_s32(vmovl_s16(vget_low_s16(vxa)), va_multiplier);
#ifdef __aarch64__
int32x4_t vacc_hi = vmulq_s32(vmovl_high_s16(vxa), va_multiplier);
#else
int32x4_t vacc_hi =
vmulq_s32(vmovl_s16(vget_high_s16(vxa)), va_multiplier);
#endif
vacc_lo =
vmlaq_s32(vacc_lo, vmovl_s16(vget_low_s16(vxb)), vb_multiplier);
#ifdef __aarch64__
vacc_hi = vmlaq_s32(vacc_hi, vmovl_high_s16(vxb), vb_multiplier);
#else
vacc_hi =
vmlaq_s32(vacc_hi, vmovl_s16(vget_high_s16(vxb)), vb_multiplier);
#endif
/* Shift right and round */
vacc_lo =
vsraq_n_s32(vacc_lo, vbicq_s32(vacc_lo, vzero_shift_mask), 31);
vacc_hi =
vsraq_n_s32(vacc_hi, vbicq_s32(vacc_hi, vzero_shift_mask), 31);
vacc_lo = vrshlq_s32(vacc_lo, vright_shift);
vacc_hi = vrshlq_s32(vacc_hi, vright_shift);
/* Pack, saturate, and add output zero point */
#ifdef __aarch64__
const int16x8_t vacc = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), vy_zero_point);
#else
const int16x8_t vacc = vqaddq_s16(
vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi)),
vy_zero_point);
#endif
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(vy_min));
vy = vmin_u8(vy, vget_low_u8(vy_max));
vst1_u8(y, vy);
y += 8;
}
if (n != 0) {
const size_t n_increment = n - 8;
const int64x1_t vld_shift = vmov_n_s64(8 * n_increment);
const uint8x8_t va = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(a + n_increment)), vld_shift));
const uint8x8_t vb = vreinterpret_u8_u64(
vshl_u64(vreinterpret_u64_u8(vld1_u8(b + n_increment)), vld_shift));
/* Subtract zero point */
const int16x8_t vxa =
vreinterpretq_s16_u16(vsubl_u8(va, va_zero_point));
const int16x8_t vxb =
vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
/* Multiply by factors and accumulate products */
int32x4_t vacc_lo =
vmulq_s32(vmovl_s16(vget_low_s16(vxa)), va_multiplier);
#ifdef __aarch64__
int32x4_t vacc_hi = vmulq_s32(vmovl_high_s16(vxa), va_multiplier);
#else
int32x4_t vacc_hi =
vmulq_s32(vmovl_s16(vget_high_s16(vxa)), va_multiplier);
#endif
vacc_lo =
vmlaq_s32(vacc_lo, vmovl_s16(vget_low_s16(vxb)), vb_multiplier);
#ifdef __aarch64__
vacc_hi = vmlaq_s32(vacc_hi, vmovl_high_s16(vxb), vb_multiplier);
#else
vacc_hi =
vmlaq_s32(vacc_hi, vmovl_s16(vget_high_s16(vxb)), vb_multiplier);
#endif
/* Shift right and round */
vacc_lo =
vsraq_n_s32(vacc_lo, vbicq_s32(vacc_lo, vzero_shift_mask), 31);
vacc_hi =
vsraq_n_s32(vacc_hi, vbicq_s32(vacc_hi, vzero_shift_mask), 31);
vacc_lo = vrshlq_s32(vacc_lo, vright_shift);
vacc_hi = vrshlq_s32(vacc_hi, vright_shift);
/* Pack, saturate, and add output zero point */
#ifdef __aarch64__
const int16x8_t vacc = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi), vy_zero_point);
#else
const int16x8_t vacc = vqaddq_s16(
vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi)),
vy_zero_point);
#endif
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(vy_min));
vy = vmin_u8(vy, vget_low_u8(vy_max));
if (n & 4) {
vst1_lane_u32(
__builtin_assume_aligned(y, 1), vreinterpret_u32_u8(vy), 0);
y += 4;
vy = vext_u8(vy, vy, 4);
}
if (n & 2) {
vst1_lane_u16(
__builtin_assume_aligned(y, 1), vreinterpret_u16_u8(vy), 0);
y += 2;
vy = vext_u8(vy, vy, 2);
}
if (n & 1) {
vst1_lane_u8(y, vy, 0);
}
}
}
else {
for (; n != 0; n--) {
const uint8x8_t va = vld1_dup_u8(a);
a += 1;
const uint8x8_t vb = vld1_dup_u8(b);
b += 1;
/* Subtract zero point */
const int16x4_t vxa =
vreinterpret_s16_u16(vget_low_u16(vsubl_u8(va, va_zero_point)));
const int16x4_t vxb =
vreinterpret_s16_u16(vget_low_u16(vsubl_u8(vb, vb_zero_point)));
/* Multiply by factors and accumulate products */
int32x2_t vacc =
vmul_s32(vget_low_s32(vmovl_s16(vxa)), vget_low_s32(va_multiplier));
vacc = vmla_s32(
vacc, vget_low_s32(vmovl_s16(vxb)), vget_low_s32(vb_multiplier));
/* Shift right and round */
vacc =
vsra_n_s32(vacc, vbic_s32(vacc, vget_low_s32(vzero_shift_mask)), 31);
vacc = vrshl_s32(vacc, vget_low_s32(vright_shift));
const int16x4_t vacc16 = vqadd_s16(
vqmovn_s32(vcombine_s32(vacc, vacc)), vget_low_s16(vy_zero_point));
/* Pack, saturate, and add output zero point */
uint8x8_t vy = vqmovun_s16(vcombine_s16(vacc16, vacc16));
vy = vmin_u8(vy, vget_low_u8(vy_max));
vy = vmax_u8(vy, vget_low_u8(vy_min));
vst1_lane_u8(y, vy, 0);
y += 1;
}
}
}
| 14,659
| 37.276762
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8vadd/sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <immintrin.h>
#include <qnnpack/common.h>
#include <qnnpack/q8vadd.h>
#include <qnnpack/scalar-utils.h>
void pytorch_q8vadd_ukernel__sse2(
size_t n,
const uint8_t* a,
const uint8_t* b,
uint8_t* y,
const union pytorch_qnnp_add_quantization_params
quantization_params[RESTRICT_STATIC 1]) {
if
PYTORCH_QNNP_LIKELY(n >= 8) {
const __m128i vzero_point_product = _mm_load_si128(
(const __m128i*)&quantization_params->sse2.zero_point_product);
const __m128i va_multiplier_lo = _mm_load_si128(
(const __m128i*)&quantization_params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128(
(const __m128i*)&quantization_params->sse2.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128(
(const __m128i*)&quantization_params->sse2.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128(
(const __m128i*)&quantization_params->sse2.b_multiplier_hi);
const __m128i vremainder_mask = _mm_load_si128(
(const __m128i*)quantization_params->sse2.remainder_mask);
const __m128i vremainder_threshold = _mm_load_si128(
(const __m128i*)quantization_params->sse2.remainder_threshold);
const __m128i vshift =
_mm_cvtsi32_si128((int)quantization_params->sse2.shift);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va = _mm_loadl_epi64((const __m128i*)a);
a += 8;
const __m128i vb = _mm_loadl_epi64((const __m128i*)b);
b += 8;
const __m128i vxa = _mm_unpacklo_epi8(va, vzero);
const __m128i vxb = _mm_unpacklo_epi8(vb, vzero);
/* Multiply by factors */
const __m128i va_product_lo = _mm_mullo_epi16(vxa, va_multiplier_lo);
const __m128i va_product_hi = _mm_add_epi16(
_mm_mulhi_epu16(vxa, va_multiplier_lo),
_mm_mullo_epi16(vxa, va_multiplier_hi));
const __m128i vb_product_lo = _mm_mullo_epi16(vxb, vb_multiplier_lo);
const __m128i vb_product_hi = _mm_add_epi16(
_mm_mulhi_epu16(vxb, vb_multiplier_lo),
_mm_mullo_epi16(vxb, vb_multiplier_hi));
/* Accumulate products */
__m128i vacc_lo = _mm_add_epi32(
vzero_point_product,
_mm_unpacklo_epi16(va_product_lo, va_product_hi));
__m128i vacc_hi = _mm_add_epi32(
vzero_point_product,
_mm_unpackhi_epi16(va_product_lo, va_product_hi));
vacc_lo = _mm_add_epi32(
vacc_lo, _mm_unpacklo_epi16(vb_product_lo, vb_product_hi));
vacc_hi = _mm_add_epi32(
vacc_hi, _mm_unpackhi_epi16(vb_product_lo, vb_product_hi));
/* Shift right and round */
const __m128i vrem_lo = _mm_add_epi32(
_mm_and_si128(vacc_lo, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), vacc_lo));
const __m128i vrem_hi = _mm_add_epi32(
_mm_and_si128(vacc_hi, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), vacc_hi));
vacc_lo = _mm_sub_epi32(
_mm_sra_epi32(vacc_lo, vshift),
_mm_cmpgt_epi32(vrem_lo, vremainder_threshold));
vacc_hi = _mm_sub_epi32(
_mm_sra_epi32(vacc_hi, vshift),
_mm_cmpgt_epi32(vrem_hi, vremainder_threshold));
/* Pack, saturate, and add output zero point */
const __m128i vy_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.y_zero_point);
const __m128i vacc =
_mm_adds_epi16(_mm_packs_epi32(vacc_lo, vacc_hi), vy_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
vy = _mm_max_epu8(
vy,
_mm_load_si128((const __m128i*)quantization_params->sse2.y_min));
vy = _mm_min_epu8(
vy,
_mm_load_si128((const __m128i*)quantization_params->sse2.y_max));
_mm_storel_epi64((__m128i*)y, vy);
y += 8;
n -= 8;
} while (n >= 8);
if (n != 0) {
const size_t n_decrement = 8 - n;
const __m128i vload_shift = _mm_cvtsi32_si128(8 * (int32_t)n_decrement);
const __m128i va = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(a - n_decrement)), vload_shift);
const __m128i vb = _mm_srl_epi64(
_mm_loadl_epi64((const __m128i*)(b - n_decrement)), vload_shift);
const __m128i vxa = _mm_unpacklo_epi8(va, vzero);
const __m128i vxb = _mm_unpacklo_epi8(vb, vzero);
/* Multiply by factors */
const __m128i va_product_lo = _mm_mullo_epi16(vxa, va_multiplier_lo);
const __m128i va_product_hi = _mm_add_epi16(
_mm_mulhi_epu16(vxa, va_multiplier_lo),
_mm_mullo_epi16(vxa, va_multiplier_hi));
const __m128i vb_product_lo = _mm_mullo_epi16(vxb, vb_multiplier_lo);
const __m128i vb_product_hi = _mm_add_epi16(
_mm_mulhi_epu16(vxb, vb_multiplier_lo),
_mm_mullo_epi16(vxb, vb_multiplier_hi));
/* Accumulate products */
__m128i vacc_lo = _mm_add_epi32(
vzero_point_product,
_mm_unpacklo_epi16(va_product_lo, va_product_hi));
__m128i vacc_hi = _mm_add_epi32(
vzero_point_product,
_mm_unpackhi_epi16(va_product_lo, va_product_hi));
vacc_lo = _mm_add_epi32(
vacc_lo, _mm_unpacklo_epi16(vb_product_lo, vb_product_hi));
vacc_hi = _mm_add_epi32(
vacc_hi, _mm_unpackhi_epi16(vb_product_lo, vb_product_hi));
/* Shift right and round */
const __m128i vrem_lo = _mm_add_epi32(
_mm_and_si128(vacc_lo, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), vacc_lo));
const __m128i vrem_hi = _mm_add_epi32(
_mm_and_si128(vacc_hi, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), vacc_hi));
vacc_lo = _mm_sub_epi32(
_mm_sra_epi32(vacc_lo, vshift),
_mm_cmpgt_epi32(vrem_lo, vremainder_threshold));
vacc_hi = _mm_sub_epi32(
_mm_sra_epi32(vacc_hi, vshift),
_mm_cmpgt_epi32(vrem_hi, vremainder_threshold));
/* Pack, saturate, and add output zero point */
const __m128i vy_zero_point = _mm_load_si128(
(const __m128i*)quantization_params->sse2.y_zero_point);
const __m128i vacc =
_mm_adds_epi16(_mm_packs_epi32(vacc_lo, vacc_hi), vy_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
vy = _mm_max_epu8(
vy,
_mm_load_si128((const __m128i*)quantization_params->sse2.y_min));
vy = _mm_min_epu8(
vy,
_mm_load_si128((const __m128i*)quantization_params->sse2.y_max));
if (n & 4) {
*((uint32_t*)y) = (uint32_t)_mm_cvtsi128_si32(vy);
vy = _mm_shuffle_epi32(vy, _MM_SHUFFLE(3, 2, 1, 1));
y += 4;
}
if (n & 2) {
*((uint16_t*)y) = (uint16_t)_mm_extract_epi16(vy, 0);
vy = _mm_srli_epi32(vy, 16);
y += 2;
}
if (n & 1) {
*((uint8_t*)y) = (uint8_t)_mm_cvtsi128_si32(vy);
}
}
}
else {
const int32_t vzero_point_product =
quantization_params->sse2.zero_point_product[0];
const uint32_t va_multiplier = quantization_params->sse2.a_multiplier;
const uint32_t vb_multiplier = quantization_params->sse2.b_multiplier;
const int32_t vremainder_mask = quantization_params->sse2.remainder_mask[0];
const int32_t vremainder_threshold =
quantization_params->sse2.remainder_threshold[0];
const uint32_t vshift = quantization_params->sse2.shift;
const int32_t vy_zero_point =
(int32_t)quantization_params->sse2.y_zero_point[0];
const int32_t vy_max =
(int32_t)(uint32_t)quantization_params->sse2.y_max[0];
const int32_t vy_min =
(int32_t)(uint32_t)quantization_params->sse2.y_min[0];
while (n-- != 0) {
const uint32_t vxa = (uint32_t)*a++;
const uint32_t vxb = (uint32_t)*b++;
/* Multiply by factors and accumulate products */
int32_t vacc = vzero_point_product + (int32_t)(vxa * va_multiplier) +
(int32_t)(vxb * vb_multiplier);
/* Shift right and round */
const int32_t vrem = (vacc & vremainder_mask) - (int32_t)(vacc < 0);
vacc = asr_s32(vacc, vshift) + (int32_t)(vrem > vremainder_threshold);
/* Clamp and add output zero point */
int32_t vy = vacc + vy_zero_point;
vy = vy >= vy_min ? vy : vy_min;
vy = vy <= vy_max ? vy : vy_max;
*y++ = (uint8_t)vy;
}
}
}
| 8,872
| 38.435556
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/AlignedAllocator.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <cstddef>
#include <limits>
#include <stdlib.h>
template <typename T, size_t Alignment>
class AlignedAllocator;
template <size_t Alignment>
class AlignedAllocator<void, Alignment> {
public:
typedef void* pointer;
typedef const void* const_pointer;
typedef void value_type;
template <class U>
struct rebind {
typedef AlignedAllocator<U, Alignment> other;
};
};
template <typename T, size_t Alignment>
class AlignedAllocator {
public:
typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
#if __cplusplus >= 201402L
typedef std::true_type propagate_on_container_move_assignment;
#endif
template <class U>
struct rebind {
typedef AlignedAllocator<U, Alignment> other;
};
public:
inline AlignedAllocator() noexcept = default;
template <class U>
inline AlignedAllocator(
const AlignedAllocator<U, Alignment>& other) noexcept {}
inline size_type max_size() const noexcept {
return (std::numeric_limits<size_type>::max() - size_type(Alignment)) /
sizeof(T);
}
inline pointer address(reference x) const noexcept {
return std::addressof(x);
}
inline const_pointer address(const_reference x) const noexcept {
return std::addressof(x);
}
inline pointer allocate(
size_type n,
typename AlignedAllocator<void, Alignment>::const_pointer hint = 0) {
#if defined(__ANDROID__)
void* memory = memalign(Alignment, n * sizeof(T));
if (memory == 0) {
#if !defined(__GNUC__) || defined(__EXCEPTIONS)
throw std::bad_alloc();
#endif
}
#else
void* memory = nullptr;
if (posix_memalign(&memory, Alignment, n * sizeof(T)) != 0) {
#if !defined(__GNUC__) || defined(__EXCEPTIONS)
throw std::bad_alloc();
#endif
}
#endif
return static_cast<pointer>(memory);
}
inline void deallocate(pointer p, size_type n) noexcept {
free(static_cast<void*>(p));
}
template <class U, class... Args>
inline void construct(U* p, Args&&... args) {
::new (static_cast<void*>(p)) U(std::forward<Args>(args)...);
}
template <class U>
inline void destroy(U* p) {
p->~U();
}
};
| 2,492
| 22.299065
| 75
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/common.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#if defined(__GNUC__)
#if defined(__clang__) || (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 5)
#define PYTORCH_QNNP_UNREACHABLE \
do { \
__builtin_unreachable(); \
} while (0)
#else
#define PYTORCH_QNNP_UNREACHABLE \
do { \
__builtin_trap(); \
} while (0)
#endif
#elif defined(_MSC_VER)
#define PYTORCH_QNNP_UNREACHABLE __assume(0)
#else
#define PYTORCH_QNNP_UNREACHABLE \
do { \
} while (0)
#endif
#if defined(_MSC_VER)
#define PYTORCH_QNNP_ALIGN(alignment) __declspec(align(alignment))
#else
#define PYTORCH_QNNP_ALIGN(alignment) __attribute__((__aligned__(alignment)))
#endif
#define PYTORCH_QNNP_COUNT_OF(array) (sizeof(array) / sizeof(0 [array]))
#if defined(__GNUC__)
#define PYTORCH_QNNP_LIKELY(condition) (__builtin_expect(!!(condition), 1))
#define PYTORCH_QNNP_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
#else
#define PYTORCH_QNNP_LIKELY(condition) (!!(condition))
#define PYTORCH_QNNP_UNLIKELY(condition) (!!(condition))
#endif
#if defined(__GNUC__)
#define PYTORCH_QNNP_INLINE inline __attribute__((__always_inline__))
#else
#define PYTORCH_QNNP_INLINE inline
#endif
#ifndef PYTORCH_QNNP_INTERNAL
#if defined(__ELF__)
#define PYTORCH_QNNP_INTERNAL __attribute__((__visibility__("internal")))
#elif defined(__MACH__)
#define PYTORCH_QNNP_INTERNAL __attribute__((__visibility__("hidden")))
#else
#define PYTORCH_QNNP_INTERNAL
#endif
#endif
#ifndef PYTORCH_QNNP_PRIVATE
#if defined(__ELF__)
#define PYTORCH_QNNP_PRIVATE __attribute__((__visibility__("hidden")))
#elif defined(__MACH__)
#define PYTORCH_QNNP_PRIVATE __attribute__((__visibility__("hidden")))
#else
#define PYTORCH_QNNP_PRIVATE
#endif
#endif
#if defined(_MSC_VER)
#define RESTRICT_STATIC
#define restrict
#else
#define RESTRICT_STATIC restrict static
#endif
#if defined(_MSC_VER)
#define __builtin_prefetch
#endif
#if defined(__GNUC__)
#define PYTORCH_QNNP_UNALIGNED __attribute__((__aligned__(1)))
#elif defined(_MSC_VER)
#if defined(_M_IX86)
#define PYTORCH_QNNP_UNALIGNED
#else
#define PYTORCH_QNNP_UNALIGNED __unaligned
#endif
#else
#error "Platform-specific implementation of PYTORCH_QNNP_UNALIGNED required"
#endif
| 2,489
| 25.210526
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/hgemm.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_HGEMM_UKERNEL_FUNCTION(fn_name) \
void fn_name( \
size_t mr, \
size_t nr, \
size_t k, \
const void* a, \
size_t a_stride, \
const void* w, \
void* c, \
size_t c_stride, \
const struct pytorch_qnnp_fp16_clamping_params* clamping_params);
DECLARE_PYTORCH_HGEMM_UKERNEL_FUNCTION(pytorch_hgemm_ukernel_8x8__neonfp16arith)
DECLARE_PYTORCH_HGEMM_UKERNEL_FUNCTION(pytorch_hgemm_ukernel_8x8__aarch32_neonfp16arith)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 1,140
| 29.026316
| 88
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/indirection.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include<pytorch_qnnpack.h>
#include <qnnpack/common.h>
#ifdef __cplusplus
extern "C" {
#endif
PYTORCH_QNNP_INTERNAL void pytorch_qnnp_indirection_init_conv3d(
pytorch_qnnp_operator_t op,
size_t output_tile_size,
size_t tiled_output_size);
PYTORCH_QNNP_INTERNAL void pytorch_qnnp_indirection_init_dwconv(
pytorch_qnnp_operator_t op,
size_t batch_start);
PYTORCH_QNNP_INTERNAL void pytorch_qnnp_indirection_init_deconv2d(
pytorch_qnnp_operator_t op,
size_t output_tile_size,
size_t tiled_output_size);
PYTORCH_QNNP_INTERNAL void pytorch_qnnp_indirection_init_maxpool2d(
pytorch_qnnp_operator_t op,
size_t batch_start);
PYTORCH_QNNP_INTERNAL void pytorch_qnnp_indirection_set_step_dimensions(
pytorch_qnnp_operator_t op);
#ifdef __cplusplus
} /* extern "C" */
#endif
| 1,090
| 23.244444
| 72
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/isa-checks.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <cpuinfo.h>
#define TEST_REQUIRES_X86_SSE2 \
do { \
if (!cpuinfo_initialize() || !cpuinfo_has_x86_sse2()) { \
return; \
} \
} while (0)
#define TEST_REQUIRES_ARM_NEON \
do { \
if (!cpuinfo_initialize() || !cpuinfo_has_arm_neon()) { \
return; \
} \
} while (0)
#define TEST_REQUIRES_ARM_NEON_FP16_ARITH \
do { \
if (!cpuinfo_initialize() || !cpuinfo_has_arm_neon_fp16_arith()) { \
return; \
} \
} while (0)
| 1,284
| 37.939394
| 72
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/log.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <inttypes.h>
#include <clog.h>
#ifndef PYTORCH_QNNP_LOG_LEVEL
#define PYTORCH_QNNP_LOG_LEVEL CLOG_WARNING
#endif
CLOG_DEFINE_LOG_DEBUG(
pytorch_qnnp_log_debug,
"QNNPACK",
PYTORCH_QNNP_LOG_LEVEL);
CLOG_DEFINE_LOG_INFO(pytorch_qnnp_log_info, "QNNPACK", PYTORCH_QNNP_LOG_LEVEL);
CLOG_DEFINE_LOG_WARNING(
pytorch_qnnp_log_warning,
"QNNPACK",
PYTORCH_QNNP_LOG_LEVEL);
CLOG_DEFINE_LOG_ERROR(
pytorch_qnnp_log_error,
"QNNPACK",
PYTORCH_QNNP_LOG_LEVEL);
CLOG_DEFINE_LOG_FATAL(
pytorch_qnnp_log_fatal,
"QNNPACK",
PYTORCH_QNNP_LOG_LEVEL);
| 822
| 21.861111
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/operator.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/requantization.h>
enum pytorch_qnnp_format {
pytorch_qnnp_format_quint8 = 0x02000000,
pytorch_qnnp_format_float32 = 0x02020202,
pytorch_qnnp_format_float16 = 0x01010101,
};
enum pytorch_qnnp_ukernel_type {
pytorch_qnnp_ukernel_type_none = 0,
pytorch_qnnp_ukernel_type_add,
pytorch_qnnp_ukernel_type_average_pooling,
pytorch_qnnp_ukernel_type_channel_shuffle,
pytorch_qnnp_ukernel_type_clamp,
pytorch_qnnp_ukernel_type_conv,
pytorch_qnnp_ukernel_type_dwconv,
pytorch_qnnp_ukernel_type_gemm,
pytorch_qnnp_ukernel_type_gemm_sparse_dq,
pytorch_qnnp_ukernel_type_gemm_prepackA_sparse_dq,
pytorch_qnnp_ukernel_type_global_average_pooling,
pytorch_qnnp_ukernel_type_lut,
pytorch_qnnp_ukernel_type_max_pooling,
pytorch_qnnp_ukernel_type_softargmax,
pytorch_qnnp_ukernel_type_xzp_gemm,
};
typedef struct {
union {
const uint32_t* col_indices_w32;
const uint16_t* col_indices_w16;
const uint8_t* col_indices_w8;
};
union {
const uint32_t* row_values_w32;
const uint16_t* row_values_w16;
const uint8_t* row_values_w8;
};
const uint8_t* values;
uint32_t row_block_size;
uint32_t col_block_size;
enum pytorch_qnnp_sparse_matrix_indices_dtype indices_dtype;
} sparse_matrix_t;
struct pytorch_qnnp_operator {
size_t batch_size;
uint32_t input_padding_depth;
uint32_t input_padding_height;
uint32_t input_padding_width;
uint32_t adjustment_height;
uint32_t adjustment_width;
uint32_t kernel_depth;
uint32_t kernel_height;
uint32_t kernel_width;
uint32_t stride_depth;
uint32_t stride_height;
uint32_t stride_width;
uint32_t dilation_depth;
uint32_t dilation_height;
uint32_t dilation_width;
uint32_t groups;
size_t group_stride;
size_t group_channels;
size_t group_input_channels;
size_t group_output_channels;
size_t channels;
size_t input_depth;
size_t input_height;
size_t input_width;
size_t input_pixel_stride;
const void* input;
const void** indirection_buffer;
void* a_sum;
size_t step_depth;
size_t step_height;
size_t step_width;
size_t input2_pixel_stride;
const void* input2;
size_t output_depth;
size_t output_height;
size_t output_width;
size_t output_pixel_stride;
void* output;
void* packed_weights;
float input_scale;
float output_scale;
uint8_t input_zero_point;
uint8_t kernel_zero_point;
uint8_t output_zero_point;
uint8_t output_min;
uint8_t output_max;
size_t valid_batch_size;
size_t last_input_height;
size_t last_input_width;
const void* last_input;
void* zero_buffer;
void* zero_pointer;
void* lookup_table;
union {
union pytorch_qnnp_q31_requantization_params requantization_params;
union pytorch_qnnp_conv_quantization_params conv_quantization_params;
union pytorch_qnnp_add_quantization_params add_quantization_params;
union pytorch_qnnp_avgpool_quantization_params avgpool_quantization_params;
union pytorch_qnnp_u8_clamping_params u8_clamping_params;
};
enum pytorch_qnnp_ukernel_type ukernel_type;
enum pytorch_qnnp_format format;
bool per_channel;
bool transpose;
// Sparsity support
sparse_matrix_t sparse_matrix;
const void* bias;
struct pytorch_qnnp_conv_dynamic_quantization_params dynamic_conv_quantization_params;
uint8_t* prepacked_a;
};
static inline uint32_t pytorch_qnnp_operator_get_log2_output_element_size(
const struct pytorch_qnnp_operator* convolution) {
return (uint32_t)(convolution->format & UINT32_C(0xFF));
}
static inline uint32_t pytorch_qnnp_operator_get_log2_input_element_size(
const struct pytorch_qnnp_operator* convolution) {
return (uint32_t)((convolution->format >> 8) & UINT32_C(0xFF));
}
static inline uint32_t pytorch_qnnp_operator_get_log2_kernel_element_size(
const struct pytorch_qnnp_operator* convolution) {
return (uint32_t)((convolution->format >> 16) & UINT32_C(0xFF));
}
static inline uint32_t pytorch_qnnp_operator_get_log2_bias_element_size(
const struct pytorch_qnnp_operator* convolution) {
return (uint32_t)((convolution->format >> 24) & UINT32_C(0xFF));
}
| 4,379
| 26.721519
| 88
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/params.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <cpuinfo.h>
struct pytorch_qnnp_fp16_clamping_params {
uint16_t scale;
uint16_t max;
uint16_t min;
};
struct pytorch_qnnp_fp32_clamping_params {
float max;
float min;
};
union pytorch_qnnp_fp32_requantization_params {
struct {
float* scales;
uint8_t output_zero_point;
uint8_t output_max;
uint8_t output_min;
float min_less_zero_point;
float max_less_zero_point;
float magic;
int32_t magic_less_zero_point;
} scalar;
struct {
float* scales;
float max;
float min;
float magic;
int32_t magic_less_zero_point;
} neon;
struct {
float* scales;
int16_t zero_point;
uint8_t max;
uint8_t min;
} neonv8;
struct {
PYTORCH_QNNP_ALIGN(16) float* scales;
PYTORCH_QNNP_ALIGN(16) int16_t zero_point[8];
PYTORCH_QNNP_ALIGN(16) uint8_t max[16];
PYTORCH_QNNP_ALIGN(16) uint8_t min[16];
} sse2;
struct {
PYTORCH_QNNP_ALIGN(16) float* scales;
PYTORCH_QNNP_ALIGN(16) float min_less_zero_point[4];
PYTORCH_QNNP_ALIGN(16) float max_less_zero_point[4];
PYTORCH_QNNP_ALIGN(16) float magic[4];
PYTORCH_QNNP_ALIGN(16) int32_t magic_less_zero_point[4];
} psimd;
};
union pytorch_qnnp_precise_requantization_params {
struct {
uint32_t multiplier;
uint32_t rounding_lo;
uint32_t rounding_hi;
uint32_t shift_less_32;
int32_t min_less_zero_point;
int32_t max_less_zero_point;
int32_t zero_point;
} scalar;
struct {
int32_t multiplier;
int32_t right_shift;
int16_t zero_point;
uint8_t max;
uint8_t min;
} neon;
struct {
PYTORCH_QNNP_ALIGN(16) uint32_t multiplier[4];
PYTORCH_QNNP_ALIGN(16) uint64_t rounding[2];
PYTORCH_QNNP_ALIGN(16) uint32_t shift[4];
PYTORCH_QNNP_ALIGN(16) int16_t zero_point[8];
PYTORCH_QNNP_ALIGN(16) uint8_t max[16];
PYTORCH_QNNP_ALIGN(16) uint8_t min[16];
} sse2;
};
union pytorch_qnnp_q31_requantization_params {
struct {
int32_t multiplier;
int32_t remainder_mask;
int32_t remainder_threshold;
uint32_t shift;
int32_t min_less_zero_point;
int32_t max_less_zero_point;
int32_t zero_point;
} scalar;
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
struct {
int32_t multiplier;
int32_t right_shift;
int16_t zero_point;
uint8_t max;
uint8_t min;
} neon;
#endif /* CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 */
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
struct {
PYTORCH_QNNP_ALIGN(16) uint32_t multiplier[4];
PYTORCH_QNNP_ALIGN(16) uint64_t rounding[2];
PYTORCH_QNNP_ALIGN(16) int32_t remainder_mask[4];
PYTORCH_QNNP_ALIGN(16) int32_t remainder_threshold[4];
PYTORCH_QNNP_ALIGN(16) uint64_t shift[2];
PYTORCH_QNNP_ALIGN(16) int16_t zero_point[8];
PYTORCH_QNNP_ALIGN(16) uint8_t max[16];
PYTORCH_QNNP_ALIGN(16) uint8_t min[16];
} sse2;
#endif /* CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 */
};
union pytorch_qnnp_conv_quantization_params {
struct {
const uint8_t* kernel_zero_points;
int32_t input_zero_point;
const float* requantization_scales;
int32_t output_min_less_zero_point;
int32_t output_max_less_zero_point;
int32_t output_zero_point;
} scalar;
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
struct {
const uint8_t* kernel_zero_points;
int16_t input_zero_point;
const float* requantization_scales;
int16_t output_zero_point;
uint8_t output_max;
uint8_t output_min;
// Following four are for nearest-ties-to-even
// rounding in aarch32. This saves some instructions
// needed otherwise.
float vfmax;
float vfmin;
float vfmagic;
int32_t vimagic;
} neon;
#endif /* CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 */
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
struct {
PYTORCH_QNNP_ALIGN(16) const uint8_t* kernel_zero_points;
PYTORCH_QNNP_ALIGN(16) int16_t input_zero_point[8];
const PYTORCH_QNNP_ALIGN(16) float* requantization_scales;
PYTORCH_QNNP_ALIGN(16) int16_t output_zero_point[8];
PYTORCH_QNNP_ALIGN(16) uint8_t output_max[16];
PYTORCH_QNNP_ALIGN(16) uint8_t output_min[16];
} sse2;
#endif /* CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 */
};
struct pytorch_qnnp_conv_dynamic_quantization_params {
int16_t input_zero_point;
const uint8_t* kernel_zero_points;
const float* multipliers;
};
union pytorch_qnnp_requantization_params {
union pytorch_qnnp_precise_requantization_params precise;
union pytorch_qnnp_fp32_requantization_params fp32;
union pytorch_qnnp_q31_requantization_params q31;
};
union pytorch_qnnp_add_quantization_params {
struct {
int32_t zero_point_product;
uint32_t a_multiplier;
uint32_t b_multiplier;
uint32_t shift;
int32_t remainder_mask;
int32_t remainder_threshold;
int32_t y_zero_point;
int32_t y_max;
int32_t y_min;
} scalar;
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
struct {
uint8_t a_zero_point;
uint8_t b_zero_point;
int16_t y_zero_point;
int32_t a_multiplier;
int32_t b_multiplier;
int32_t right_shift;
uint8_t y_max;
uint8_t y_min;
} neon;
#endif
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
struct {
PYTORCH_QNNP_ALIGN(16) int32_t zero_point_product[4];
PYTORCH_QNNP_ALIGN(16) uint16_t a_multiplier_lo[8];
PYTORCH_QNNP_ALIGN(16) uint16_t a_multiplier_hi[8];
PYTORCH_QNNP_ALIGN(16) uint16_t b_multiplier_lo[8];
PYTORCH_QNNP_ALIGN(16) uint16_t b_multiplier_hi[8];
PYTORCH_QNNP_ALIGN(16) int32_t remainder_mask[4];
PYTORCH_QNNP_ALIGN(16) int32_t remainder_threshold[4];
PYTORCH_QNNP_ALIGN(16) int16_t y_zero_point[8];
PYTORCH_QNNP_ALIGN(16) uint8_t y_max[16];
PYTORCH_QNNP_ALIGN(16) uint8_t y_min[16];
uint32_t shift;
uint32_t a_multiplier;
uint32_t b_multiplier;
} sse2;
#endif
};
union pytorch_qnnp_avgpool_quantization_params {
struct {
int32_t bias;
float scale;
int32_t output_zero_point;
uint8_t output_max;
uint8_t output_min;
} scalar;
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
struct {
int32_t bias;
float scale;
int16_t output_zero_point;
uint8_t output_max;
uint8_t output_min;
// Following four are for nearest-ties-to-even
// rounding in aarch32. This saves some instructions
// needed otherwise.
float vfmax;
float vfmin;
float vfmagic;
int32_t vimagic;
} neon;
#endif /* CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 */
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
struct {
PYTORCH_QNNP_ALIGN(16) int32_t bias[4];
PYTORCH_QNNP_ALIGN(16) float scale[4];
PYTORCH_QNNP_ALIGN(16) int16_t output_zero_point[8];
PYTORCH_QNNP_ALIGN(16) uint8_t output_max[16];
PYTORCH_QNNP_ALIGN(16) uint8_t output_min[16];
} sse2;
#endif
};
union pytorch_qnnp_u8_clamping_params {
struct {
int32_t output_max;
int32_t output_min;
} scalar;
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
struct {
uint8_t output_max;
uint8_t output_min;
} neon;
#endif /* CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 */
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
struct {
PYTORCH_QNNP_ALIGN(16) uint8_t output_max[16];
PYTORCH_QNNP_ALIGN(16) uint8_t output_min[16];
} sse2;
#endif /* CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 */
};
typedef void (*pytorch_q8gemm_ukernel_function)(
size_t mr,
size_t nr,
size_t k,
const uint8_t* a,
size_t a_stride,
const void* w,
uint8_t* c,
size_t c_stride,
size_t output_channel_index,
const union pytorch_qnnp_conv_quantization_params* quantization_params);
/*
Q8 GEMM kernel with support for dynamic quantization.
The w parameter designates weights, and is to be passed on to this kernel
exactly as returned by the pack function. The initial bias portion of
this buffer will be ignored.
The bias parameter, expects max(nr, 8) floating-point biases. Technically
the kernels only need nr biases from the buffer pointed to by this parameter,
but end up reading at most 8 to keep the logic simple and fast. Consequently,
make sure this parameter has enough storage for 8 floating point numbers to
avoid triggering out of bound errors. The remaining 8 - nr biases, if any,
will be unused.
quantization_params contains the quantization parameters, namely input, and
kernel zero points, and the multiplier. The multiplier is expected to be
equal to input_scale * kernel_scale.
*/
typedef void (*pytorch_q8gemm_dq_ukernel_function)(
size_t mr,
size_t nr,
size_t k,
const uint8_t* a,
size_t a_stride,
const void* w,
const float* bias,
float* c,
size_t c_stride,
size_t output_channel_index,
const struct pytorch_qnnp_conv_dynamic_quantization_params* quantization_params);
typedef void (*pytorch_q8gemm_dq_sparse_ukernel_function)(
size_t mr,
size_t nr,
const uint8_t* a,
size_t a_stride,
const uint8_t* packed_w,
const uint32_t* w_row_ptr,
const uint32_t* w_block_ids_ptr,
const float* bias,
float* c,
size_t c_stride,
size_t output_channel_index,
const struct pytorch_qnnp_conv_dynamic_quantization_params* quantization_params);
typedef void (*pytorch_q8gemm_dq_sparse_packedA_w32_ukernel_function)(
size_t mr,
size_t nr,
const uint8_t* a_packed,
const uint8_t* packed_w,
const uint32_t* w_row_ptr,
const uint32_t* w_block_ids_ptr,
const float* bias,
float* c,
size_t c_stride,
size_t output_channel_index,
const struct pytorch_qnnp_conv_dynamic_quantization_params* quantization_params);
typedef void (*pytorch_q8gemm_dq_sparse_packedA_w16_ukernel_function)(
size_t mr,
size_t nr,
const uint8_t* a_packed,
const uint8_t* packed_w,
const uint16_t* w_row_ptr,
const uint16_t* w_block_ids_ptr,
const float* bias,
float* c,
size_t c_stride,
size_t output_channel_index,
const struct pytorch_qnnp_conv_dynamic_quantization_params* quantization_params);
typedef void (*pytorch_q8gemm_dq_sparse_packedA_w8_ukernel_function)(
size_t mr,
size_t nr,
const uint8_t* a_packed,
const uint8_t* packed_w,
const uint8_t* w_row_ptr,
const uint8_t* w_block_ids_ptr,
const float* bias,
float* c,
size_t c_stride,
size_t output_channel_index,
const struct pytorch_qnnp_conv_dynamic_quantization_params* quantization_params);
typedef void (*pytorch_q8gemm_sparse_packA_ukernel_function)(
const size_t mr,
const size_t K,
const uint8_t* a,
const size_t a_stride,
uint8_t* a_packed);
typedef void (*pytorch_q8conv_ukernel_function)(
size_t mr,
size_t nr,
size_t kc,
size_t ks,
const uint8_t** a,
const void* w,
uint8_t* c,
size_t c_stride,
size_t output_channel_index,
const union pytorch_qnnp_conv_quantization_params* quantization_params);
typedef void (*pytorch_q8gemm_xzp_ukernel_function)(
size_t mr,
size_t nr,
size_t k,
const uint8_t* a,
size_t a_stride,
const int32_t* a_sum,
const void* w,
uint8_t* c,
size_t c_stride,
const union pytorch_qnnp_q31_requantization_params* requantization_params);
typedef void (*pytorch_q8sum_rows_ukernel_function)(
const uint8_t* a,
size_t m,
size_t k,
size_t stride,
int32_t multiplier,
int32_t* sums);
typedef void (*pytorch_xzipc_ukernel_function)(size_t n, const void* x, void* y);
typedef void (
*pytorch_xzipv_ukernel_function)(size_t n, size_t m, const void* x, void* y);
typedef void (*pytorch_x8lut_ukernel_function)(
size_t n,
const uint8_t* x,
const uint8_t* t,
uint8_t* y);
typedef void (*pytorch_sgemm_ukernel_function)(
size_t mr,
size_t nr,
size_t k,
const float* a,
size_t a_stride,
const float* w,
float* c,
size_t c_stride,
const struct pytorch_qnnp_fp32_clamping_params* clamping_params);
typedef void (*pytorch_sconv_ukernel_function)(
size_t mr,
size_t nr,
size_t kc,
size_t ks,
const float** a,
const float* w,
float* c,
size_t c_stride,
const struct pytorch_qnnp_fp32_clamping_params* clamping_params);
typedef void (*pytorch_hgemm_ukernel_function)(
size_t mr,
size_t nr,
size_t k,
const void* a,
size_t a_stride,
const void* w,
void* c,
size_t c_stride,
const struct pytorch_qnnp_fp16_clamping_params* clamping_params);
typedef void (*pytorch_q8dwconv2d_up_ukernel_function)(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
uint8_t* output,
size_t input_stride,
size_t output_increment,
const union pytorch_qnnp_conv_quantization_params* quantization_params);
typedef void (*pytorch_q8dwconv2d_mp_ukernel_function)(
size_t channels,
size_t output_width,
const uint8_t** input,
const void* weights,
int32_t* buffer,
uint8_t* output,
size_t input_stride,
size_t output_increment,
const union pytorch_qnnp_conv_quantization_params* quantization_params);
typedef void (*pytorch_q8dwconv3d_mp_ukernel_function)(
size_t channels,
size_t output_height,
size_t output_width,
const uint8_t** input,
const void* weights,
int32_t* buffer,
uint8_t* output,
size_t input_row_stride,
size_t input_col_stride,
size_t output_increment,
const union pytorch_qnnp_conv_quantization_params* quantization_params);
typedef void (*pytorch_q8gavgpool_up_ukernel_function)(
size_t m,
size_t n,
const uint8_t* x,
size_t x_stride,
const uint8_t* zero,
uint8_t* y,
const union pytorch_qnnp_avgpool_quantization_params* quantization_params);
typedef void (*pytorch_q8gavgpool_mp_ukernel_function)(
size_t m,
size_t n,
const uint8_t* x,
size_t x_stride,
const uint8_t* zero,
int32_t* buffer,
uint8_t* y,
const union pytorch_qnnp_avgpool_quantization_params* quantization_params);
typedef void (*pytorch_q8avgpool_up_ukernel_function)(
size_t n,
size_t ks,
size_t kc,
const uint8_t** x,
const uint8_t* zero,
uint8_t* y,
size_t x_increment,
size_t y_increment,
const union pytorch_qnnp_avgpool_quantization_params* quantization_params);
typedef void (*pytorch_q8avgpool_mp_ukernel_function)(
size_t n,
size_t ks,
size_t kc,
const uint8_t** x,
const uint8_t* zero,
int32_t* buffer,
uint8_t* y,
size_t x_increment,
size_t y_increment,
const union pytorch_qnnp_avgpool_quantization_params* quantization_params);
typedef void (*pytorch_u8maxpool_ukernel_function)(
size_t n,
size_t ks,
size_t kc,
const uint8_t** x,
uint8_t* y,
size_t x_increment,
size_t y_increment,
const union pytorch_qnnp_u8_clamping_params* params);
typedef void (*pytorch_u8clamp_ukernel_function)(
size_t n,
const uint8_t* x,
uint8_t* y,
const union pytorch_qnnp_u8_clamping_params* params);
typedef uint8_t (*pytorch_u8rmax_ukernel_function)(size_t n, const uint8_t* x);
typedef void (*pytorch_u8lut32norm_ukernel_function)(
size_t n,
const uint8_t* x,
const uint32_t* t,
uint8_t* y);
typedef void (*pytorch_q8vadd_ukernel_function)(
size_t n,
const uint8_t* a,
const uint8_t* b,
uint8_t* y,
const union pytorch_qnnp_add_quantization_params* quantization_params);
struct pytorch_q8conv_parameters {
pytorch_q8gemm_ukernel_function gemm;
pytorch_q8conv_ukernel_function conv;
pytorch_q8gemm_dq_ukernel_function gemm_dq;
uint8_t mr;
uint8_t nr;
uint8_t kr;
};
struct pytorch_q8gemm_sparse_parameters {
pytorch_q8gemm_dq_sparse_ukernel_function gemm_dq;
// w32, w16, and w8 refer to variants of the kernel which use uint32_t,
// uint16_t, and uint8_t datatype for row values/col indices respectively
pytorch_q8gemm_dq_sparse_packedA_w32_ukernel_function packedA_w32_gemm_dq;
pytorch_q8gemm_dq_sparse_packedA_w16_ukernel_function packedA_w16_gemm_dq;
pytorch_q8gemm_dq_sparse_packedA_w8_ukernel_function packedA_w8_gemm_dq;
pytorch_q8gemm_sparse_packA_ukernel_function packA;
uint8_t mr;
uint8_t nr;
uint8_t kr;
uint8_t log2_mr;
uint8_t log2_row_block_size;
uint32_t row_block_size;
uint32_t col_block_size;
};
struct pytorch_q8conv_xzp_parameters {
pytorch_q8gemm_xzp_ukernel_function gemm;
/* no conv ukernel */
uint8_t mr;
uint8_t nr;
uint8_t kr;
uint8_t kc;
size_t kthreshold;
};
struct pytorch_q8dwconv2d_up_parameters {
pytorch_q8dwconv2d_up_ukernel_function updw;
pytorch_q8dwconv2d_up_ukernel_function updw_per_channel;
uint8_t cr;
};
struct pytorch_q8dwconv2d_mp_parameters {
pytorch_q8dwconv2d_mp_ukernel_function mpdw;
pytorch_q8dwconv2d_mp_ukernel_function mpdw_per_channel;
uint8_t cr;
};
struct pytorch_q8dwconv3d_mp_parameters {
pytorch_q8dwconv3d_mp_ukernel_function mpdw;
uint8_t cr;
};
struct pytorch_q8sum_rows_parameters {
pytorch_q8sum_rows_ukernel_function sum_rows;
uint32_t m;
};
struct pytorch_q8gavgpool_parameters {
pytorch_q8gavgpool_up_ukernel_function ltnr;
pytorch_q8gavgpool_up_ukernel_function genr_lemr;
pytorch_q8gavgpool_mp_ukernel_function genr_gtmr;
uint8_t mr;
uint8_t nr;
};
struct pytorch_q8avgpool_parameters {
pytorch_q8avgpool_up_ukernel_function ltkr;
pytorch_q8avgpool_up_ukernel_function gekr_lemr;
pytorch_q8avgpool_mp_ukernel_function gekr_gtmr;
uint8_t mr;
uint8_t qr;
uint8_t kr;
};
struct pytorch_u8maxpool_parameters {
pytorch_u8maxpool_ukernel_function ltkr;
pytorch_u8maxpool_ukernel_function gekr;
uint8_t mr;
uint8_t qr;
uint8_t kr;
};
struct pytorch_x8zip_parameters {
pytorch_xzipc_ukernel_function x2;
pytorch_xzipc_ukernel_function x3;
pytorch_xzipc_ukernel_function x4;
pytorch_xzipv_ukernel_function xm;
};
struct pytorch_qnnp_parameters {
struct pytorch_q8conv_parameters q8conv;
struct pytorch_q8gemm_sparse_parameters q8gemm_sparse_c1x4;
struct pytorch_q8gemm_sparse_parameters q8gemm_sparse_c8x1;
struct pytorch_q8conv_xzp_parameters q8conv_xzp;
struct pytorch_q8dwconv2d_up_parameters q8dw9;
struct pytorch_q8dwconv2d_mp_parameters q8dw25;
struct pytorch_q8dwconv3d_mp_parameters q8dw27;
struct pytorch_q8sum_rows_parameters q8sum_rows;
pytorch_q8vadd_ukernel_function q8vadd;
struct pytorch_q8gavgpool_parameters q8gavgpool;
struct pytorch_q8avgpool_parameters q8avgpool;
struct pytorch_u8maxpool_parameters u8maxpool;
pytorch_u8lut32norm_ukernel_function u8lut32norm;
pytorch_u8clamp_ukernel_function u8clamp;
pytorch_u8rmax_ukernel_function u8rmax;
struct pytorch_x8zip_parameters x8zip;
pytorch_x8lut_ukernel_function x8lut;
bool initialized;
};
#ifdef __cplusplus
extern "C" {
#endif
extern struct pytorch_qnnp_parameters pytorch_qnnp_params;
#ifdef __cplusplus
}
#endif
| 19,075
| 26.929722
| 85
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/q8avgpool.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_Q8MPAVGPOOL_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t n, \
size_t ks, \
size_t kc, \
const uint8_t** x, \
const uint8_t* zero, \
int32_t* buffer, \
uint8_t* y, \
size_t x_increment, \
size_t y_increment, \
const union pytorch_qnnp_avgpool_quantization_params* \
quantization_params);
DECLARE_PYTORCH_Q8MPAVGPOOL_UKERNEL_FUNCTION(pytorch_q8avgpool_ukernel_mp8x9p8q__neon)
DECLARE_PYTORCH_Q8MPAVGPOOL_UKERNEL_FUNCTION(pytorch_q8avgpool_ukernel_mp8x9p8q__sse2)
#define DECLARE_PYTORCH_Q8UPAVGPOOL_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t n, \
size_t ks, \
size_t kc, \
const uint8_t** x, \
const uint8_t* zero, \
uint8_t* y, \
size_t x_increment, \
size_t y_increment, \
const union pytorch_qnnp_avgpool_quantization_params* \
quantization_params);
DECLARE_PYTORCH_Q8UPAVGPOOL_UKERNEL_FUNCTION(pytorch_q8avgpool_ukernel_up8x9__neon)
DECLARE_PYTORCH_Q8UPAVGPOOL_UKERNEL_FUNCTION(pytorch_q8avgpool_ukernel_up8xm__neon)
DECLARE_PYTORCH_Q8UPAVGPOOL_UKERNEL_FUNCTION(pytorch_q8avgpool_ukernel_up8x9__sse2)
DECLARE_PYTORCH_Q8UPAVGPOOL_UKERNEL_FUNCTION(pytorch_q8avgpool_ukernel_up8xm__sse2)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 2,436
| 40.305085
| 86
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/q8conv.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_Q8CONV_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t mr, \
size_t nr, \
size_t kc, \
size_t ks, \
const uint8_t** a, \
const void* w, \
uint8_t* c, \
size_t c_stride, \
size_t output_channel_index, \
const union pytorch_qnnp_conv_quantization_params* quantization_params);
DECLARE_PYTORCH_Q8CONV_UKERNEL_FUNCTION(pytorch_q8conv_ukernel_4x8__neon)
DECLARE_PYTORCH_Q8CONV_UKERNEL_FUNCTION(pytorch_q8conv_ukernel_4x8__aarch32_neon)
DECLARE_PYTORCH_Q8CONV_UKERNEL_FUNCTION(pytorch_q8conv_ukernel_8x8__aarch64_neon)
DECLARE_PYTORCH_Q8CONV_UKERNEL_FUNCTION(pytorch_q8conv_ukernel_8x8__neon)
DECLARE_PYTORCH_Q8CONV_UKERNEL_FUNCTION(pytorch_q8conv_ukernel_4x4c2__sse2)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 1,454
| 32.837209
| 81
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/q8dwconv.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_Q8UPDWCONV_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t channels, \
size_t output_width, \
const uint8_t** input, \
const void* weights, \
uint8_t* output, \
size_t input_stride, \
size_t output_increment, \
const union pytorch_qnnp_conv_quantization_params* quantization_params);
DECLARE_PYTORCH_Q8UPDWCONV_UKERNEL_FUNCTION(pytorch_q8dwconv_ukernel_up8x9__neon)
DECLARE_PYTORCH_Q8UPDWCONV_UKERNEL_FUNCTION(
pytorch_q8dwconv_ukernel_up8x9_per_channel__neon)
DECLARE_PYTORCH_Q8UPDWCONV_UKERNEL_FUNCTION(pytorch_q8dwconv_ukernel_up8x9__aarch32_neon)
DECLARE_PYTORCH_Q8UPDWCONV_UKERNEL_FUNCTION(
pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon)
DECLARE_PYTORCH_Q8UPDWCONV_UKERNEL_FUNCTION(pytorch_q8dwconv_ukernel_up8x9__sse2)
DECLARE_PYTORCH_Q8UPDWCONV_UKERNEL_FUNCTION(
pytorch_q8dwconv_ukernel_up8x9_per_channel__sse2)
#define DECLARE_PYTORCH_Q8MPDWCONV_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t channels, \
size_t output_width, \
const uint8_t** input, \
const void* weights, \
int32_t* buffer, \
uint8_t* output, \
size_t input_stride, \
size_t output_increment, \
const union pytorch_qnnp_conv_quantization_params* quantization_params);
DECLARE_PYTORCH_Q8MPDWCONV_UKERNEL_FUNCTION(pytorch_q8dwconv_ukernel_mp8x25__neon)
DECLARE_PYTORCH_Q8MPDWCONV_UKERNEL_FUNCTION(
pytorch_q8dwconv_ukernel_mp8x25_per_channel__neon)
DECLARE_PYTORCH_Q8MPDWCONV_UKERNEL_FUNCTION(pytorch_q8dwconv_ukernel_mp8x25__sse2)
DECLARE_PYTORCH_Q8MPDWCONV_UKERNEL_FUNCTION(
pytorch_q8dwconv_ukernel_mp8x25_per_channel__sse2)
#define DECLARE_PYTORCH_Q8MPDWCONV_3D_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t channels, \
size_t output_height, \
size_t output_width, \
const uint8_t** input, \
const void* weights, \
int32_t* buffer, \
uint8_t* output, \
size_t input_row_stride, \
size_t input_col_stride, \
size_t output_increment, \
const union pytorch_qnnp_conv_quantization_params* quantization_params);
DECLARE_PYTORCH_Q8MPDWCONV_3D_UKERNEL_FUNCTION(
pytorch_q8dwconv_ukernel_mp8x27__neon)
DECLARE_PYTORCH_Q8MPDWCONV_3D_UKERNEL_FUNCTION(
pytorch_q8dwconv_ukernel_mp8x27__sse2)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 3,619
| 42.614458
| 89
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/q8gavgpool.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_Q8MPGAVGPOOL_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t m, \
size_t n, \
const uint8_t* x, \
size_t x_stride, \
const uint8_t* zero, \
int32_t* buffer, \
uint8_t* y, \
const union pytorch_qnnp_avgpool_quantization_params* \
quantization_params);
DECLARE_PYTORCH_Q8MPGAVGPOOL_UKERNEL_FUNCTION(pytorch_q8gavgpool_ukernel_mp8x7p7q__neon)
DECLARE_PYTORCH_Q8MPGAVGPOOL_UKERNEL_FUNCTION(pytorch_q8gavgpool_ukernel_mp8x7p7q__sse2)
#define DECLARE_PYTORCH_Q8UPGAVGPOOL_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t m, \
size_t n, \
const uint8_t* x, \
size_t x_stride, \
const uint8_t* zero, \
uint8_t* y, \
const union pytorch_qnnp_avgpool_quantization_params* \
quantization_params);
DECLARE_PYTORCH_Q8UPGAVGPOOL_UKERNEL_FUNCTION(pytorch_q8gavgpool_ukernel_up8x7__neon)
DECLARE_PYTORCH_Q8UPGAVGPOOL_UKERNEL_FUNCTION(pytorch_q8gavgpool_ukernel_up8xm__neon)
DECLARE_PYTORCH_Q8UPGAVGPOOL_UKERNEL_FUNCTION(pytorch_q8gavgpool_ukernel_up8x7__sse2)
DECLARE_PYTORCH_Q8UPGAVGPOOL_UKERNEL_FUNCTION(pytorch_q8gavgpool_ukernel_up8xm__sse2)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 2,200
| 39.018182
| 88
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/q8gemm.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_Q8GEMM_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t mr, \
size_t nr, \
size_t k, \
const uint8_t* a, \
size_t a_stride, \
const void* w, \
uint8_t* c, \
size_t c_stride, \
size_t output_channel_index, \
const union pytorch_qnnp_conv_quantization_params* quantization_params);
DECLARE_PYTORCH_Q8GEMM_UKERNEL_FUNCTION(pytorch_q8gemm_ukernel_3x3c8__neon)
DECLARE_PYTORCH_Q8GEMM_UKERNEL_FUNCTION(pytorch_q8gemm_ukernel_2x4c8__neon)
DECLARE_PYTORCH_Q8GEMM_UKERNEL_FUNCTION(pytorch_q8gemm_ukernel_4x8__neon)
DECLARE_PYTORCH_Q8GEMM_UKERNEL_FUNCTION(pytorch_q8gemm_ukernel_6x4__neon)
DECLARE_PYTORCH_Q8GEMM_UKERNEL_FUNCTION(pytorch_q8gemm_ukernel_8x8__neon)
DECLARE_PYTORCH_Q8GEMM_UKERNEL_FUNCTION(pytorch_q8gemm_ukernel_4x8__aarch32_neon)
DECLARE_PYTORCH_Q8GEMM_UKERNEL_FUNCTION(pytorch_q8gemm_ukernel_8x8__aarch64_neon)
DECLARE_PYTORCH_Q8GEMM_UKERNEL_FUNCTION(pytorch_q8gemm_ukernel_2x4c8__sse2)
DECLARE_PYTORCH_Q8GEMM_UKERNEL_FUNCTION(pytorch_q8gemm_ukernel_4x4c2__sse2)
#define DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t mr, \
size_t nr, \
size_t k, \
const uint8_t* a, \
size_t a_stride, \
const void* w, \
const float* b, \
float* c, \
size_t c_stride, \
size_t output_channel_index, \
const struct pytorch_qnnp_conv_dynamic_quantization_params* quantization_params);
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_UKERNEL_FUNCTION(pytorch_q8gemm_dq_ukernel_4x8__neon)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_UKERNEL_FUNCTION(pytorch_q8gemm_dq_ukernel_4x8__aarch32_neon)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_UKERNEL_FUNCTION(pytorch_q8gemm_dq_ukernel_8x8__aarch64_neon)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_UKERNEL_FUNCTION(pytorch_q8gemm_dq_ukernel_4x4c2__sse2)
#define DECLARE_PYTORCH_Q8GEMM_XZP_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t mr, \
size_t nr, \
size_t k, \
const uint8_t* a, \
size_t a_stride, \
const int32_t* a_sum, \
const void* w, \
uint8_t* c, \
size_t c_stride, \
const union pytorch_qnnp_q31_requantization_params* \
requantization_params);
DECLARE_PYTORCH_Q8GEMM_XZP_UKERNEL_FUNCTION(pytorch_q8gemm_xzp_ukernel_4x8c2__neon)
DECLARE_PYTORCH_Q8GEMM_XZP_UKERNEL_FUNCTION(pytorch_q8gemm_xzp_ukernel_4x8c2__aarch32_neon)
PYTORCH_QNNP_INTERNAL void pytorch_q8sumrows_ukernel_4x__neon(
const uint8_t* a,
size_t m,
size_t k,
size_t stride,
const int32_t multiplier,
int32_t* row_sum);
#ifdef __cplusplus
} /* extern "C" */
#endif
| 4,025
| 42.290323
| 105
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/q8gemm_sparse.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_Q8GEMM_SPARSE_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t mr, \
size_t nr, \
const uint8_t* a, \
size_t a_stride, \
const uint8_t* packed_w, \
const uint32_t* w_row_ptr, \
const uint32_t* w_block_ids_ptr, \
uint8_t* c, \
size_t c_stride, \
size_t output_channel_index, \
const union pytorch_qnnp_conv_quantization_params* quantization_params);
DECLARE_PYTORCH_Q8GEMM_SPARSE_UKERNEL_FUNCTION(pytorch_q8gemm_sparse_1x4_ukernel_4x8__neon)
DECLARE_PYTORCH_Q8GEMM_SPARSE_UKERNEL_FUNCTION(pytorch_q8gemm_sparse_1x4_ukernel_8x8__neon)
DECLARE_PYTORCH_Q8GEMM_SPARSE_UKERNEL_FUNCTION(pytorch_q8gemm_sparse_1x4_ukernel_4x8__aarch32_neon)
DECLARE_PYTORCH_Q8GEMM_SPARSE_UKERNEL_FUNCTION(pytorch_q8gemm_sparse_1x4_ukernel_8x8__aarch64_neon)
DECLARE_PYTORCH_Q8GEMM_SPARSE_UKERNEL_FUNCTION(pytorch_q8gemm_sparse_1x4_ukernel_4x4c2__sse2)
#define DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t mr, \
size_t nr, \
const uint8_t* a, \
size_t a_stride, \
const uint8_t* packed_w, \
const uint32_t* w_row_ptr, \
const uint32_t* w_block_ids_ptr, \
const float* b, \
float* c, \
size_t c_stride, \
size_t output_channel_index, \
const struct pytorch_qnnp_conv_dynamic_quantization_params* quantization_params);
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_UKERNEL_FUNCTION(pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4__neon)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_UKERNEL_FUNCTION(pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4__aarch32_neon)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_UKERNEL_FUNCTION(pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4__aarch64_neon)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_UKERNEL_FUNCTION(pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4__sse2)
#define DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION( \
fn_name, w_index_dtype) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t mr, \
size_t nr, \
const uint8_t* a_packed, \
const uint8_t* packed_w, \
const w_index_dtype* w_row_ptr, \
const w_index_dtype* w_block_ids_ptr, \
const float* b, \
float* c, \
size_t c_stride, \
size_t output_channel_index, \
const struct pytorch_qnnp_conv_dynamic_quantization_params* \
quantization_params);
// w32, w16, and w8 refer to variants of the kernel which use uint32_t,
// uint16_t, and uint8_t datatype for row values/col indices respectively
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w32__aarch32_neon,
uint32_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w16__aarch32_neon,
uint16_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_1x4_ukernel_4x8_packedA_w8__aarch32_neon,
uint8_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w32__aarch32_neon,
uint32_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w16__aarch32_neon,
uint16_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w8__aarch32_neon,
uint8_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4_packedA__aarch32_neon,
uint32_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w32__aarch64_neon,
uint32_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w16__aarch64_neon,
uint16_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w8__aarch64_neon,
uint8_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w32__aarch64_neon,
uint32_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w16__aarch64_neon,
uint16_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_8x1_ukernel_8x8_packedA_w8__aarch64_neon,
uint8_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4_packedA_w32__sse2,
uint32_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4_packedA_w16__sse2,
uint16_t)
DECLARE_PYTORCH_Q8GEMM_DYNAMIC_QUANTIZATION_SPARSE_PACKEDA_UKERNEL_FUNCTION(
pytorch_q8gemm_dq_sparse_1x4_ukernel_8x4_packedA_w8__sse2,
uint8_t)
#define DECLARE_PYTORCH_Q8GEMM_PARSE_PACKA_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
const size_t mr, \
const size_t K, \
const uint8_t* a, \
const size_t a_stride, \
uint8_t* a_packed);
DECLARE_PYTORCH_Q8GEMM_PARSE_PACKA_UKERNEL_FUNCTION(
pytorch_q8gemm_sparse_packA_ukernel_4x4__aarch32_neon)
DECLARE_PYTORCH_Q8GEMM_PARSE_PACKA_UKERNEL_FUNCTION(
pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch32_neon)
DECLARE_PYTORCH_Q8GEMM_PARSE_PACKA_UKERNEL_FUNCTION(
pytorch_q8gemm_sparse_packA_ukernel_8x4__aarch64_neon)
DECLARE_PYTORCH_Q8GEMM_PARSE_PACKA_UKERNEL_FUNCTION(
pytorch_q8gemm_sparse_packA_ukernel_8x4__sse2)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 7,636
| 49.576159
| 123
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/q8vadd.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_Q8VADD_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t n, \
const uint8_t* a, \
const uint8_t* b, \
uint8_t* y, \
const union pytorch_qnnp_add_quantization_params* quantization_params);
DECLARE_PYTORCH_Q8VADD_UKERNEL_FUNCTION(pytorch_q8vadd_ukernel__neon)
DECLARE_PYTORCH_Q8VADD_UKERNEL_FUNCTION(pytorch_q8vadd_ukernel__sse2)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 950
| 26.171429
| 77
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/requantization-stubs.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/params.h>
#include <pthreadpool.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef void (*pytorch_requantization_function)(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output);
#define DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(fn_name) \
void fn_name( \
size_t n, \
const int32_t* input, \
float scale, \
uint8_t zero_point, \
uint8_t qmin, \
uint8_t qmax, \
uint8_t* output);
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(
pytorch_qnnp_requantize_precise__scalar_unsigned32)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(
pytorch_qnnp_requantize_precise__scalar_unsigned64)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(
pytorch_qnnp_requantize_precise__scalar_signed64)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_precise__sse2)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_precise__ssse3)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_precise__sse4)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_precise__neon)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_precise__psimd)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_fp32__scalar_lrintf)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_fp32__scalar_magic)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_fp32__sse2)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_fp32__neon)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_fp32__psimd)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_q31__scalar)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_q31__sse2)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_q31__ssse3)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_q31__sse4)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_q31__neon)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_q31__psimd)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_gemmlowp__scalar)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_gemmlowp__sse2)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_gemmlowp__ssse3)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_gemmlowp__sse4)
DECLARE_PYTORCH_REQUANTIZATION_FUNCTION(pytorch_qnnp_requantize_gemmlowp__neon)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 2,989
| 38.866667
| 84
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/requantization.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <fp16/bitcasts.h>
#include <qnnpack/params.h>
#include <qnnpack/scalar-utils.h>
static inline union pytorch_qnnp_q31_requantization_params
pytorch_qnnp_compute_scalar_requantization_params(
float scale,
uint8_t zero_point,
uint8_t min,
uint8_t max) {
/* Compute requantization parameters */
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
/* Multiplier is in [0x40000000, 0x7FFFFF80] range */
const int32_t multiplier = (int32_t)(
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
/* Shift is in [0, 31] range */
const int32_t shift = 127 + 31 - 32 - (fp32_to_bits(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
union pytorch_qnnp_q31_requantization_params params;
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const uint32_t remainder_threshold = remainder_mask >> 1;
params.scalar.multiplier = multiplier;
params.scalar.remainder_mask = (int32_t)remainder_mask;
params.scalar.remainder_threshold = (int32_t)remainder_threshold;
params.scalar.shift = (uint32_t)shift;
params.scalar.min_less_zero_point =
(int32_t)(uint32_t)min - (int32_t)(uint32_t)zero_point;
params.scalar.max_less_zero_point =
(int32_t)(uint32_t)max - (int32_t)(uint32_t)zero_point;
params.scalar.zero_point = (int32_t)(uint32_t)zero_point;
return params;
}
static inline union pytorch_qnnp_fp32_requantization_params
pytorch_qnnp_compute_scalar_fp32_requantization_params(
float* scales,
uint8_t zero_point,
uint8_t min,
uint8_t max) {
union pytorch_qnnp_fp32_requantization_params params;
params.scalar.scales = scales;
params.scalar.output_zero_point = zero_point;
params.scalar.output_max = max;
params.scalar.output_min = min;
params.scalar.min_less_zero_point = ((float)((int32_t)(uint32_t)min -
(int32_t)(uint32_t)zero_point));
params.scalar.max_less_zero_point = ((float)((int32_t)(uint32_t)max -
(int32_t)(uint32_t)zero_point));
params.scalar.magic = 12582912.0f;
params.scalar.magic_less_zero_point = (INT32_C(0x4B400000) -
(int32_t)(uint32_t)zero_point);
return params;
}
static inline union pytorch_qnnp_q31_requantization_params
pytorch_qnnp_compute_requantization_params(
float scale,
uint8_t zero_point,
uint8_t min,
uint8_t max) {
/* Compute requantization parameters */
const uint32_t scale_bits = fp32_to_bits(scale);
/* Multiplier is in [0x40000000, 0x7FFFFF80] range */
const int32_t multiplier = (int32_t)(
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
/* Shift is in [0, 31] range */
const int32_t shift = 127 + 31 - 32 - (fp32_to_bits(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
union pytorch_qnnp_q31_requantization_params params;
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const uint32_t remainder_threshold = remainder_mask >> 1;
params.sse2.multiplier[0] = multiplier;
params.sse2.multiplier[1] = multiplier;
params.sse2.multiplier[2] = multiplier;
params.sse2.multiplier[3] = multiplier;
params.sse2.rounding[0] = UINT64_C(0x40000000);
params.sse2.rounding[1] = UINT64_C(0x40000000);
params.sse2.remainder_mask[0] = (int32_t)remainder_mask;
params.sse2.remainder_mask[1] = (int32_t)remainder_mask;
params.sse2.remainder_mask[2] = (int32_t)remainder_mask;
params.sse2.remainder_mask[3] = (int32_t)remainder_mask;
params.sse2.remainder_threshold[0] = (int32_t)remainder_threshold;
params.sse2.remainder_threshold[1] = (int32_t)remainder_threshold;
params.sse2.remainder_threshold[2] = (int32_t)remainder_threshold;
params.sse2.remainder_threshold[3] = (int32_t)remainder_threshold;
params.sse2.shift[0] = (uint64_t)(uint32_t)shift;
params.sse2.shift[1] = (uint64_t)(uint32_t)shift;
for (uint32_t i = 0; i < 8; i++) {
params.sse2.zero_point[i] = (int16_t)(uint16_t)zero_point;
}
for (uint32_t i = 0; i < 16; i++) {
params.sse2.max[i] = max;
params.sse2.min[i] = min;
}
#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
params.neon.multiplier = multiplier;
params.neon.right_shift = -shift;
params.neon.zero_point = (int16_t)(uint16_t)zero_point;
params.neon.max = max;
params.neon.min = min;
#else
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const uint32_t remainder_threshold = remainder_mask >> 1;
params.scalar.multiplier = multiplier;
params.scalar.remainder_mask = (int32_t)remainder_mask;
params.scalar.remainder_threshold = (int32_t)remainder_threshold;
params.scalar.shift = (uint32_t)shift;
params.scalar.min_less_zero_point =
(int32_t)(uint32_t)min - (int32_t)(uint32_t)zero_point;
params.scalar.max_less_zero_point =
(int32_t)(uint32_t)max - (int32_t)(uint32_t)zero_point;
params.scalar.zero_point = (int32_t)(uint32_t)zero_point;
#endif
return params;
}
static inline union pytorch_qnnp_conv_quantization_params
pytorch_qnnp_compute_conv_quantization_params(
uint8_t input_zero_point,
const uint8_t* kernel_zero_points,
const float* requantization_scales,
uint8_t output_zero_point,
uint8_t output_min,
uint8_t output_max) {
union pytorch_qnnp_conv_quantization_params params;
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
params.sse2.kernel_zero_points = kernel_zero_points;
for (uint32_t i = 0; i < 8; i++) {
params.sse2.input_zero_point[i] = (int16_t)(uint16_t)input_zero_point;
}
params.sse2.requantization_scales = requantization_scales;
for (uint32_t i = 0; i < 8; i++) {
params.sse2.output_zero_point[i] = (int16_t)(uint16_t)output_zero_point;
}
for (uint32_t i = 0; i < 16; i++) {
params.sse2.output_max[i] = output_max;
params.sse2.output_min[i] = output_min;
}
#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
params.neon.input_zero_point = (int16_t)(uint16_t)input_zero_point;
params.neon.kernel_zero_points = kernel_zero_points;
params.neon.requantization_scales = requantization_scales;
params.neon.output_zero_point = (int16_t)(uint16_t)output_zero_point;
params.neon.output_max = output_max;
params.neon.output_min = output_min;
params.neon.vfmin = ((float)((int32_t)(uint32_t)output_min -
(int32_t)(uint32_t)output_zero_point));
params.neon.vfmax = ((float)((int32_t)(uint32_t)output_max -
(int32_t)(uint32_t)output_zero_point));
params.neon.vfmagic = 12582912.0f;
params.neon.vimagic = (INT32_C(0x4B400000) -
(int32_t)(uint32_t)output_zero_point);
#else
params.scalar.input_zero_point = (int32_t)(uint32_t)input_zero_point;
params.scalar.kernel_zero_points = kernel_zero_points;
params.scalar.requantization_scales = requantization_scales;
params.scalar.output_min_less_zero_point =
(int32_t)(uint32_t)output_min - (int32_t)(uint32_t)output_zero_point;
params.scalar.output_max_less_zero_point =
(int32_t)(uint32_t)output_max - (int32_t)(uint32_t)output_zero_point;
params.scalar.output_zero_point = (int32_t)(uint32_t)output_zero_point;
#endif
return params;
}
static inline union pytorch_qnnp_avgpool_quantization_params
pytorch_qnnp_compute_avgpool_quantization_params(
int32_t bias,
float scale,
uint8_t output_zero_point,
uint8_t output_min,
uint8_t output_max) {
/* Compute requantization parameters */
assert(scale >= 0x1.0p-32f);
assert(scale < 256.0f);
union pytorch_qnnp_avgpool_quantization_params params;
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
params.sse2.bias[0] = bias;
params.sse2.bias[1] = bias;
params.sse2.bias[2] = bias;
params.sse2.bias[3] = bias;
params.sse2.scale[0] = scale;
params.sse2.scale[1] = scale;
params.sse2.scale[2] = scale;
params.sse2.scale[3] = scale;
for (uint32_t i = 0; i < 8; i++) {
params.sse2.output_zero_point[i] = (int16_t)(uint16_t)output_zero_point;
}
for (uint32_t i = 0; i < 16; i++) {
params.sse2.output_max[i] = output_max;
params.sse2.output_min[i] = output_min;
}
#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
params.neon.bias = bias;
params.neon.scale = scale;
params.neon.output_zero_point = (int16_t)(uint16_t)output_zero_point;
params.neon.output_max = output_max;
params.neon.output_min = output_min;
params.neon.vfmin = ((float)((int32_t)(uint32_t)output_min -
(int32_t)(uint32_t)output_zero_point));
params.neon.vfmax = ((float)((int32_t)(uint32_t)output_max -
(int32_t)(uint32_t)output_zero_point));
params.neon.vfmagic = 12582912.0f;
params.neon.vimagic = (INT32_C(0x4B400000) -
(int32_t)(uint32_t)output_zero_point);
#else
params.scalar.bias = bias;
params.scalar.scale = scale;
params.scalar.output_zero_point = (int32_t)(uint32_t)output_zero_point;
params.scalar.output_max = (int32_t)(uint32_t)output_max;
params.scalar.output_min = (int32_t)(uint32_t)output_min;
#endif
return params;
}
static inline union pytorch_qnnp_avgpool_quantization_params
pytorch_qnnp_compute_scalar_avgpool_quantization_params(
int32_t bias,
float scale,
uint8_t output_zero_point,
uint8_t output_min,
uint8_t output_max) {
/* Compute requantization parameters */
assert(scale >= 0x1.0p-32f);
assert(scale < 256.0f);
union pytorch_qnnp_avgpool_quantization_params params;
params.scalar.bias = bias;
params.scalar.scale = scale;
params.scalar.output_zero_point = (int32_t)(uint32_t)output_zero_point;
params.scalar.output_max = (int32_t)(uint32_t)output_max;
params.scalar.output_min = (int32_t)(uint32_t)output_min;
return params;
}
static inline union pytorch_qnnp_u8_clamping_params
pytorch_qnnp_compute_u8_clamping_params(
uint8_t output_min,
uint8_t output_max) {
assert(output_min <= output_max);
union pytorch_qnnp_u8_clamping_params params;
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
for (uint32_t i = 0; i < 16; i++) {
params.sse2.output_max[i] = output_max;
params.sse2.output_min[i] = output_min;
}
#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
params.neon.output_max = output_max;
params.neon.output_min = output_min;
#else
params.scalar.output_min = (int32_t)(uint32_t)output_min;
params.scalar.output_max = (int32_t)(uint32_t)output_max;
#endif
return params;
}
static inline union pytorch_qnnp_add_quantization_params
pytorch_qnnp_compute_add_quantization_params(
uint8_t a_zero_point,
uint8_t b_zero_point,
uint8_t output_zero_point,
float a_output_scale,
float b_output_scale,
uint8_t output_min,
uint8_t output_max) {
assert(a_output_scale >= 0x1.0p-14f);
assert(b_output_scale >= 0x1.0p-14f);
assert(a_output_scale < 0x1.0p+8f);
assert(b_output_scale < 0x1.0p+8f);
/* Compute requantization parameters */
const float max_output_scale =
a_output_scale > b_output_scale ? a_output_scale : b_output_scale;
assert(max_output_scale >= 0x1.0p-14f);
assert(max_output_scale < 0x1.0p+8f);
const uint32_t max_scale_bits = fp32_to_bits(max_output_scale);
const int32_t max_scale_exponent = (int32_t)(max_scale_bits >> 23) - 127;
/* Shift is in [13, 31] range */
const uint32_t shift = (uint32_t)(21 - max_scale_exponent);
assert(shift < 32);
assert(shift >= 13);
const float scale_multiplier =
fp32_from_bits((uint32_t)(21 - max_scale_exponent + 127) << 23);
/* Multipliers are in [0, 2**22) range, largest multiplier is in [2**21,
* 2**22) range */
const uint32_t a_multiplier =
(uint32_t)(int32_t)lrintf(a_output_scale * scale_multiplier);
const uint32_t b_multiplier =
(uint32_t)(int32_t)lrintf(b_output_scale * scale_multiplier);
assert(
(a_multiplier > b_multiplier ? a_multiplier : b_multiplier) >=
UINT32_C(0x00200000));
assert(a_multiplier < UINT32_C(0x00400000));
assert(b_multiplier < UINT32_C(0x00400000));
union pytorch_qnnp_add_quantization_params params;
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const uint32_t remainder_threshold = remainder_mask >> 1;
const int32_t zero_point_product = (int32_t) -
(a_multiplier * (uint32_t)a_zero_point +
b_multiplier * (uint32_t)b_zero_point);
for (uint32_t i = 0; i < 4; i++) {
params.sse2.zero_point_product[i] = zero_point_product;
}
for (uint32_t i = 0; i < 8; i++) {
params.sse2.y_zero_point[i] = (int16_t)(uint16_t)output_zero_point;
}
for (uint32_t i = 0; i < 8; i++) {
params.sse2.a_multiplier_lo[i] = (uint16_t)(uint32_t)a_multiplier;
params.sse2.a_multiplier_hi[i] = (uint16_t)((uint32_t)a_multiplier >> 16);
params.sse2.b_multiplier_lo[i] = (uint16_t)(uint32_t)b_multiplier;
params.sse2.b_multiplier_hi[i] = (uint16_t)((uint32_t)b_multiplier >> 16);
}
params.sse2.a_multiplier = a_multiplier;
params.sse2.b_multiplier = b_multiplier;
for (uint32_t i = 0; i < 4; i++) {
params.sse2.remainder_mask[i] = remainder_mask;
params.sse2.remainder_threshold[i] = remainder_threshold;
}
params.sse2.shift = shift;
for (uint32_t i = 0; i < 16; i++) {
params.sse2.y_max[i] = output_max;
params.sse2.y_min[i] = output_min;
}
#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
params.neon.a_zero_point = a_zero_point;
params.neon.b_zero_point = b_zero_point;
params.neon.y_zero_point = (int16_t)(uint16_t)output_zero_point;
params.neon.a_multiplier = (int32_t)a_multiplier;
params.neon.b_multiplier = (int32_t)b_multiplier;
params.neon.right_shift = (int32_t)-shift;
params.neon.y_max = output_max;
params.neon.y_min = output_min;
#else
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const uint32_t remainder_threshold = remainder_mask >> 1;
params.scalar.zero_point_product = (int32_t) -
(a_multiplier * (uint32_t)a_zero_point +
b_multiplier * (uint32_t)b_zero_point);
params.scalar.a_multiplier = a_multiplier;
params.scalar.b_multiplier = b_multiplier;
params.scalar.remainder_mask = (int32_t)remainder_mask;
params.scalar.remainder_threshold = (int32_t)remainder_threshold;
params.scalar.shift = shift;
params.scalar.y_zero_point = (int32_t)(uint32_t)output_zero_point;
params.scalar.y_max = (int32_t)(uint32_t)output_max;
params.scalar.y_min = (int32_t)(uint32_t)output_min;
#endif
return params;
}
static inline union pytorch_qnnp_add_quantization_params
pytorch_qnnp_compute_scalar_add_quantization_params(
uint8_t a_zero_point,
uint8_t b_zero_point,
uint8_t output_zero_point,
float a_output_scale,
float b_output_scale,
uint8_t output_min,
uint8_t output_max) {
assert(a_output_scale >= 0x1.0p-10f);
assert(b_output_scale >= 0x1.0p-10f);
assert(a_output_scale < 0x1.0p+8f);
assert(b_output_scale < 0x1.0p+8f);
/* Compute requantization parameters */
const float max_output_scale =
a_output_scale > b_output_scale ? a_output_scale : b_output_scale;
assert(max_output_scale >= 0x1.0p-10f);
assert(max_output_scale < 0x1.0p+8f);
const uint32_t max_scale_bits = fp32_to_bits(max_output_scale);
const int32_t max_scale_exponent = (int32_t)(max_scale_bits >> 23) - 127;
/* Shift is in [13, 31] range */
const uint32_t shift = (uint32_t)(21 - max_scale_exponent);
assert(shift < 32);
assert(shift >= 13);
/* Multipliers are in [0, 2**22) range, largest multiplier is in [2**21,
* 2**22) range */
const uint32_t a_multiplier = (uint32_t)(int32_t)lrintf(
fp32_from_bits(fp32_to_bits(a_output_scale) + (shift << 23)));
const uint32_t b_multiplier = (uint32_t)(int32_t)lrintf(
fp32_from_bits(fp32_to_bits(b_output_scale) + (shift << 23)));
assert(
(a_multiplier > b_multiplier ? a_multiplier : b_multiplier) >=
UINT32_C(0x00200000));
assert(a_multiplier < UINT32_C(0x00400000));
assert(b_multiplier < UINT32_C(0x00400000));
union pytorch_qnnp_add_quantization_params params;
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const uint32_t remainder_threshold = remainder_mask >> 1;
params.scalar.zero_point_product = (int32_t) -
(a_multiplier * (uint32_t)a_zero_point +
b_multiplier * (uint32_t)b_zero_point);
params.scalar.a_multiplier = a_multiplier;
params.scalar.b_multiplier = b_multiplier;
params.scalar.remainder_mask = (int32_t)remainder_mask;
params.scalar.remainder_threshold = (int32_t)remainder_threshold;
params.scalar.shift = shift;
params.scalar.y_zero_point = (int32_t)(uint32_t)output_zero_point;
params.scalar.y_max = (int32_t)(uint32_t)output_max;
params.scalar.y_min = (int32_t)(uint32_t)output_min;
return params;
}
static inline uint8_t pytorch_qnnp_q31_requantize(
int32_t n,
union pytorch_qnnp_q31_requantization_params params) {
const int64_t product = (int64_t)n * (int64_t)params.scalar.multiplier;
const int32_t q31product =
(int32_t)(uint32_t)((uint64_t)(product + INT64_C(0x40000000)) >> 31);
const int32_t remainder =
(q31product & params.scalar.remainder_mask) - (int32_t)(n < 0);
n = asr_s32(q31product, params.scalar.shift) +
(int32_t)(remainder > params.scalar.remainder_threshold);
if (n < params.scalar.min_less_zero_point) {
n = params.scalar.min_less_zero_point;
}
if (n > params.scalar.max_less_zero_point) {
n = params.scalar.max_less_zero_point;
}
return (uint8_t)(n + params.scalar.zero_point);
}
static inline uint8_t pytorch_qnnp_fp32_requantize(
int32_t n,
union pytorch_qnnp_fp32_requantization_params params,
int32_t output_channel_index) {
const long lmin =
(long)((int32_t)(uint32_t)params.scalar.output_min -
(int32_t)(uint32_t)params.scalar.output_zero_point);
const long lmax =
(long)((int32_t)(uint32_t)params.scalar.output_max -
(int32_t)(uint32_t)params.scalar.output_zero_point);
const float n_scaled = (float)n * params.scalar.scales[output_channel_index];
const long n_rounded = lrintf(n_scaled);
const int32_t n_clamped = (int32_t)(
n_rounded < lmin ? lmin : n_rounded > lmax ? lmax : n_rounded);
const int32_t n_biased =
n_clamped + (int32_t)(uint32_t)params.scalar.output_zero_point;
return (uint8_t)n_biased;
}
static inline uint8_t pytorch_qnnp_fp32_requantize_magic(
int32_t n,
union pytorch_qnnp_fp32_requantization_params params,
int32_t output_channel_index) {
const float fmin = params.scalar.min_less_zero_point;
const float fmax = params.scalar.max_less_zero_point;
const float fmagic = params.scalar.magic;
const int32_t imagic = params.scalar.magic_less_zero_point;
const float n_scaled = (float)n * params.scalar.scales[output_channel_index];
const float n_clamped =
n_scaled < fmin ? fmin : n_scaled > fmax ? fmax : n_scaled;
const int32_t n_biased = (int32_t)fp32_to_bits(n_clamped + fmagic) - imagic;
return (uint8_t)n_biased;
}
static inline uint8_t pytorch_qnnp_avgpool_quantize(
int32_t n,
union pytorch_qnnp_avgpool_quantization_params params) {
const float scaled_n = ((float)n)*params.scalar.scale;
int32_t n_rounded = (int32_t)lrintf(scaled_n) + params.scalar.output_zero_point;
const int32_t lmin =
(int32_t)(uint32_t)params.scalar.output_min;
const int32_t lmax =
(int32_t)(uint32_t)params.scalar.output_max;
n_rounded = (
n_rounded < lmin ? lmin : n_rounded > lmax ? lmax : n_rounded);
return (uint8_t)n_rounded;
}
static inline uint8_t pytorch_qnnp_add_quantize(
uint8_t a,
uint8_t b,
union pytorch_qnnp_add_quantization_params params) {
/* Multiply by factors and accumulate products */
int32_t acc = params.scalar.zero_point_product +
(int32_t)((uint32_t)a * params.scalar.a_multiplier) +
(int32_t)((uint32_t)b * params.scalar.b_multiplier);
/* Shift right and round */
const int32_t rem = (acc & params.scalar.remainder_mask) - (int32_t)(acc < 0);
acc = asr_s32(acc, params.scalar.shift) +
(int32_t)(rem > params.scalar.remainder_threshold);
/* Clamp and add output zero point */
int32_t y = acc + params.scalar.y_zero_point;
if (y >= params.scalar.y_max) {
y = params.scalar.y_max;
}
if (y <= params.scalar.y_min) {
y = params.scalar.y_min;
}
return (uint8_t)y;
}
| 20,682
| 36.8117
| 82
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/scalar-utils.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <assert.h>
#include <limits.h>
#include <stdbool.h>
#include <stdint.h>
#include <fp16/bitcasts.h>
#if defined(__clang__)
#if __clang_major__ == 3 && __clang_minor__ >= 7 || __clang_major__ > 3
#define PYTORCH_QNNP_IGNORE_SHIFT_BASE_UB \
__attribute__((__no_sanitize__("shift-base")))
#else
#define PYTORCH_QNNP_IGNORE_SHIFT_BASE_UB
#endif
#elif defined(__GNUC__)
#if __GNUC__ >= 8
#define PYTORCH_QNNP_IGNORE_SHIFT_BASE_UB \
__attribute__((__no_sanitize__("shift-base")))
#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 || __GNUC__ > 4
/* 4.9 <= gcc < 8 support ubsan, but doesn't support no_sanitize attribute */
#define PYTORCH_QNNP_IGNORE_SHIFT_BASE_UB
#ifndef PYTORCH_QNNP_USE_SHIFT_BASE_UB_WORKAROUND
#define PYTORCH_QNNP_USE_SHIFT_BASE_UB_WORKAROUND 1
#endif
#else
#define PYTORCH_QNNP_IGNORE_SHIFT_BASE_UB
#endif
#else
#define PYTORCH_QNNP_IGNORE_SHIFT_BASE_UB
#endif
PYTORCH_QNNP_IGNORE_SHIFT_BASE_UB
inline static int32_t asr_s32(int32_t x, uint32_t n) {
#ifdef PYTORCH_QNNP_USE_SHIFT_BASE_UB_WORKAROUND
#if defined(__x86_64__) || defined(__aarch64__)
return (int32_t)((uint64_t)(int64_t)x >> n);
#else
return x >= 0 ? x >> n : ~(~x >> n);
#endif
#else
return x >> n;
#endif
}
PYTORCH_QNNP_IGNORE_SHIFT_BASE_UB
inline static int64_t asr_s64(int64_t x, uint32_t n) {
#ifdef PYTORCH_QNNP_USE_SHIFT_BASE_UB_WORKAROUND
return x >= 0 ? x >> n : ~(~x >> n);
#else
return x >> n;
#endif
}
inline static uint8_t pytorch_scalar_requantize_precise(
int32_t value,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax) {
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
const uint32_t multiplier =
(scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
/*
* Compute absolute value of input as unsigned 32-bit int.
* All further computations will work with unsigned values to avoid undefined
* behaviour on signed operations.
*/
const uint32_t abs_value = (value >= 0) ? (uint32_t)value : -(uint32_t)value;
/* Compute full 64-bit product of 32-bit factors */
const uint64_t product = (uint64_t)abs_value * (uint64_t)multiplier;
/*
* Shift the full 64-bit product right with rounding.
* Rounding is performed towards closest integer, with midpoints rounded up
* (same as away from zero).
*/
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const uint32_t abs_scaled_value = (uint32_t)((product + rounding) >> shift);
/*
* Copy the sign of input to scaled absolute input value.
*/
const int32_t scaled_value =
(int32_t)(value >= 0 ? abs_scaled_value : -abs_scaled_value);
/* Clamp scaled value with zero point between smin and smax */
int32_t clamped_value = scaled_value;
const int32_t smin = (int32_t)(uint32_t)qmin - (int32_t)(uint32_t)zero_point;
if (clamped_value < smin) {
clamped_value = smin;
}
const int32_t smax = (int32_t)(uint32_t)qmax - (int32_t)(uint32_t)zero_point;
if (clamped_value > smax) {
clamped_value = smax;
}
/* Add zero point to clamped value */
const int32_t biased_value = clamped_value + (int32_t)(uint32_t)zero_point;
return biased_value;
}
| 3,517
| 28.316667
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/sconv.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_SCONV_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t mr, \
size_t nr, \
size_t kc, \
size_t ks, \
const float** a, \
const float* w, \
float* c, \
size_t c_stride, \
const struct pytorch_qnnp_fp32_clamping_params* params);
DECLARE_PYTORCH_SCONV_UKERNEL_FUNCTION(pytorch_sconv_ukernel_6x8__psimd)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 1,062
| 26.973684
| 72
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/sdwconv.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_SUPDWCONV_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t channels, \
size_t output_width, \
const float** input, \
const float* weights, \
float* output, \
size_t input_stride, \
size_t output_increment, \
const struct pytorch_qnnp_fp32_clamping_params* clamping_params);
DECLARE_PYTORCH_SUPDWCONV_UKERNEL_FUNCTION(pytorch_sdwconv_ukernel_up4x9__psimd)
#define DECLARE_PYTORCH_SMPDWCONV_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t channels, \
size_t output_width, \
const uint8_t** input, \
const void* weights, \
int32_t* buffer, \
uint8_t* output, \
size_t input_stride, \
size_t output_increment, \
const struct pytorch_qnnp_fp32_clamping_params* clamping_params);
#ifdef __cplusplus
} /* extern "C" */
#endif
| 1,686
| 33.428571
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/sgemm.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_SGEMM_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t mr, \
size_t nr, \
size_t k, \
const float* a, \
size_t a_stride, \
const float* w, \
float* c, \
size_t c_stride, \
const struct pytorch_qnnp_fp32_clamping_params* clamping_params);
DECLARE_PYTORCH_SGEMM_UKERNEL_FUNCTION(pytorch_sgemm_ukernel_5x8__neon)
DECLARE_PYTORCH_SGEMM_UKERNEL_FUNCTION(pytorch_sgemm_ukernel_6x8__neon)
DECLARE_PYTORCH_SGEMM_UKERNEL_FUNCTION(pytorch_sgemm_ukernel_6x8__psimd)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 1,195
| 29.666667
| 72
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/u8clamp.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_U8CLAMP_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t n, \
const uint8_t* x, \
uint8_t* y, \
const union pytorch_qnnp_u8_clamping_params* params);
DECLARE_PYTORCH_U8CLAMP_UKERNEL_FUNCTION(pytorch_u8clamp_ukernel__neon)
DECLARE_PYTORCH_U8CLAMP_UKERNEL_FUNCTION(pytorch_u8clamp_ukernel__sse2)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 890
| 25.205882
| 72
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/u8lut32norm.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_X8LUT32NORM_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t n, const uint8_t* x, const uint32_t* t, uint8_t* y);
DECLARE_PYTORCH_X8LUT32NORM_UKERNEL_FUNCTION(pytorch_u8lut32norm_ukernel__scalar)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 686
| 21.9
| 81
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/u8maxpool.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_U8MAXPOOL_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t n, \
size_t ks, \
size_t kc, \
const uint8_t** x, \
uint8_t* y, \
size_t x_increment, \
size_t y_increment, \
const union pytorch_qnnp_u8_clamping_params* params);
DECLARE_PYTORCH_U8MAXPOOL_UKERNEL_FUNCTION(pytorch_u8maxpool_ukernel_16x9p8q__neon)
DECLARE_PYTORCH_U8MAXPOOL_UKERNEL_FUNCTION(pytorch_u8maxpool_ukernel_16x9p8q__sse2)
DECLARE_PYTORCH_U8MAXPOOL_UKERNEL_FUNCTION(pytorch_u8maxpool_ukernel_sub16__neon)
DECLARE_PYTORCH_U8MAXPOOL_UKERNEL_FUNCTION(pytorch_u8maxpool_ukernel_sub16__sse2)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 1,304
| 31.625
| 83
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/u8rmax.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_U8RMAX_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL uint8_t fn_name(size_t n, const uint8_t* x);
DECLARE_PYTORCH_U8RMAX_UKERNEL_FUNCTION(pytorch_u8rmax_ukernel__neon)
DECLARE_PYTORCH_U8RMAX_UKERNEL_FUNCTION(pytorch_u8rmax_ukernel__sse2)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 686
| 21.9
| 72
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/x8lut.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_X8LUT_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t n, const uint8_t* x, const uint8_t* t, uint8_t* y);
DECLARE_PYTORCH_X8LUT_UKERNEL_FUNCTION(pytorch_x8lut_ukernel__scalar)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 661
| 21.066667
| 72
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/x8zip.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <qnnpack/common.h>
#include <qnnpack/params.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DECLARE_PYTORCH_XZIPC_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name(size_t n, const void* x, void* y);
DECLARE_PYTORCH_XZIPC_UKERNEL_FUNCTION(pytorch_qnnp_x8zip_x2__neon)
DECLARE_PYTORCH_XZIPC_UKERNEL_FUNCTION(pytorch_qnnp_x8zip_x2__sse2)
DECLARE_PYTORCH_XZIPC_UKERNEL_FUNCTION(pytorch_qnnp_x8zip_x3__neon)
DECLARE_PYTORCH_XZIPC_UKERNEL_FUNCTION(pytorch_qnnp_x8zip_x3__sse2)
DECLARE_PYTORCH_XZIPC_UKERNEL_FUNCTION(pytorch_qnnp_x8zip_x4__neon)
DECLARE_PYTORCH_XZIPC_UKERNEL_FUNCTION(pytorch_qnnp_x8zip_x4__sse2)
#define DECLARE_PYTORCH_XZIPV_UKERNEL_FUNCTION(fn_name) \
PYTORCH_QNNP_INTERNAL void fn_name( \
size_t n, size_t m, const void* x, void* y);
DECLARE_PYTORCH_XZIPV_UKERNEL_FUNCTION(pytorch_qnnp_x8zip_xm__neon)
DECLARE_PYTORCH_XZIPV_UKERNEL_FUNCTION(pytorch_qnnp_x8zip_xm__sse2)
#ifdef __cplusplus
} /* extern "C" */
#endif
| 1,253
| 29.585366
| 72
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/fp32-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <arm_neon.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_fp32__neon(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const float32x4_t vscale = vdupq_n_f32(scale);
#ifdef __aarch64__
const int16x8_t vzero_point = vdupq_n_s16((int16_t)(uint16_t)zero_point);
const uint8x16_t vqmin = vdupq_n_u8(qmin);
const uint8x16_t vqmax = vdupq_n_u8(qmax);
#else
const float32x4_t vfmin = vdupq_n_f32(
(float)((int32_t)(uint32_t)qmin - (int32_t)(uint32_t)zero_point));
const float32x4_t vfmax = vdupq_n_f32(
(float)((int32_t)(uint32_t)qmax - (int32_t)(uint32_t)zero_point));
const float32x4_t vfmagic = vdupq_n_f32(12582912.0f);
const int32x4_t vimagic =
vdupq_n_s32(INT32_C(0x4B400000) - (int32_t)(uint32_t)zero_point);
#endif
for (; n != 0; n -= 16) {
const int32x4_t x = vld1q_s32(input);
const int32x4_t y = vld1q_s32(input + 4);
const int32x4_t z = vld1q_s32(input + 8);
const int32x4_t w = vld1q_s32(input + 12);
input += 16;
/*
* Convert int32_t input to FP32 and multiply by FP32 scale.
* Both operations involve statistically unbiased roundings:
* - Large int32_t values can't be exactly represented as FP32. The
* conversion instruction in ARM NEON would round it to nearest FP32 value
* with ties to even.
* - Product of two FP32 values is generally not exactly representation as
* an FP32 value, and will be rounded to nearest FP32 value with ties to
* even.
*/
const float32x4_t x_scaled = vmulq_f32(vcvtq_f32_s32(x), vscale);
const float32x4_t y_scaled = vmulq_f32(vcvtq_f32_s32(y), vscale);
const float32x4_t z_scaled = vmulq_f32(vcvtq_f32_s32(z), vscale);
const float32x4_t w_scaled = vmulq_f32(vcvtq_f32_s32(w), vscale);
#ifdef __aarch64__
/*
* Leverage "Floating-point Convert to Signed integer, rounding to nearest
* with ties to even" instruction. This is an ARMv8 instruction (always
* available in AArch64), which saturates result on overflow. We don't need
* to specifically consider saturated results, they will be clamped at the
* last stage.
*/
const int32x4_t x_rounded = vcvtnq_s32_f32(x_scaled);
const int32x4_t y_rounded = vcvtnq_s32_f32(y_scaled);
const int32x4_t z_rounded = vcvtnq_s32_f32(z_scaled);
const int32x4_t w_rounded = vcvtnq_s32_f32(w_scaled);
/*
* Standard final sequence on ARM NEON:
* - Pack to int16_t and saturate
* - Add zero point
* - Pack to uint8_t and saturate
* - Clamp between qmin and qmax
*/
const int16x8_t xy_packed = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(x_rounded), y_rounded), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(z_rounded), w_rounded), vzero_point);
const uint8x16_t xyzw_packed =
vqmovun_high_s16(vqmovun_s16(xy_packed), zw_packed);
const uint8x16_t xyzw_clamped =
vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin);
vst1q_u8(output, xyzw_clamped);
output += 16;
#else
/*
* ARMv7 NEON offers only a floating-point to integer conversion instruction
* with rounding towards zero. In lieu of conversion instruction with
* rounding-to-nearest-even, we use a magic trick of adding a large number
* (1.5 * 2**23) to scaled value to cause rounding to integer, and then
* substracing this magic number as integer. This trick works only in a
* limited range (absolute value of input must be less than 2**22), so
* generally we have to clamp input to this range before using the magic.
* However, clamping to any smaller range works just as well, and thus we
* clamp to [qmin - zero point, qmax - zero point] range so that after we
* add zero point to the result, it gets into target [qmin, qmax] range.
*/
const float32x4_t x_clamped = vminq_f32(vmaxq_f32(x_scaled, vfmin), vfmax);
const float32x4_t y_clamped = vminq_f32(vmaxq_f32(y_scaled, vfmin), vfmax);
const float32x4_t z_clamped = vminq_f32(vmaxq_f32(z_scaled, vfmin), vfmax);
const float32x4_t w_clamped = vminq_f32(vmaxq_f32(w_scaled, vfmin), vfmax);
/*
* Conversion to integer using the "magic trick". Rounding is performed in
* the output of addition operation, and result is rounded to nearest even
* integer with ties to even.
*/
const int32x4_t x_biased = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(x_clamped, vfmagic)), vimagic);
const int32x4_t y_biased = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(y_clamped, vfmagic)), vimagic);
const int32x4_t z_biased = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(z_clamped, vfmagic)), vimagic);
const int32x4_t w_biased = vsubq_s32(
vreinterpretq_s32_f32(vaddq_f32(w_clamped, vfmagic)), vimagic);
/*
* Select low 8 bits of each 32-bit integer in the vectors for the output.
* Since result is already clamped to [qmin, qmax] subrange of [0, 255],
* saturation is not needed.
*/
const int16x8_t xy_packed =
vcombine_s16(vmovn_s32(x_biased), vmovn_s32(y_biased));
const int16x8_t zw_packed =
vcombine_s16(vmovn_s32(z_biased), vmovn_s32(w_biased));
const uint8x16_t xyzw_packed = vreinterpretq_u8_s8(
vcombine_s8(vmovn_s16(xy_packed), vmovn_s16(zw_packed)));
/*
* AArch32 version:
* 4x VCVT.F32.S32 Qd, Qm
* 4x VMUL.F32 Qd, Qm, Qn
* 4x VMIN.F32 Qd, Qm, Qn
* 4x VMAX.F32 Qd, Qm, Qn
* 4x VADD.F32 Qd, Qm, Qn
* 4x VSUB.S32 Qd, Qm, Qn
* 4x VMOVN.I32 Dd, Qm
* 2x VMOVN.I16 Dd, Qm
* ---------------------
* 30 instructions total
*
* AArch64 version:
* 4x SCVTF Vd.4S, Vn.4S
* 4x FMUL Vd.4S, Vn.4S, Vm.4S
* 4x FCVTNS Vd.4S, Vn.4S
* 2x SQXTN Vd.4H, Vn.4S
* 2x SQXTN2 Vd.8H, Vn.4S
* 2x ADD Vd.8H, Vn.8H, Vm.8H
* 1x SQXTUN Vd.8B, Vn.8H
* 1x SQXTUN2 Vd.16B, Vn.8H
* 1x UMIN Vd.16B, Vn.16B, Vm.16B
* 1x UMAX Vd.16B, Vn.16B, Vm.16B
* ---------------------
* 22 instructions total
*/
vst1q_u8(output, xyzw_packed);
output += 16;
#endif
}
}
| 6,633
| 37.569767
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/fp32-psimd.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <psimd.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_fp32__psimd(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const psimd_f32 vscale = psimd_splat_f32(scale);
const psimd_f32 vfmin = psimd_splat_f32(
(float)((int32_t)(uint32_t)qmin - (int32_t)(uint32_t)zero_point));
const psimd_f32 vfmax = psimd_splat_f32(
(float)((int32_t)(uint32_t)qmax - (int32_t)(uint32_t)zero_point));
const psimd_f32 vfmagic = psimd_splat_f32(12582912.0f);
const psimd_s32 vimagic =
psimd_splat_s32(INT32_C(0x4B400000) - (int32_t)(uint32_t)zero_point);
for (; n != 0; n -= 16) {
const psimd_s32 x = psimd_load_s32(input);
const psimd_s32 y = psimd_load_s32(input + 4);
const psimd_s32 z = psimd_load_s32(input + 8);
const psimd_s32 w = psimd_load_s32(input + 12);
input += 16;
/*
* Convert int32_t input to FP32 and multiply by FP32 scale.
* Both operations involve roundings:
* - Large int32_t values can't be exactly represented as FP32. We expect
* that conversion instruction would round it to nearest FP32 value with
* ties to even, but Clang documentation for __builtin_convertvector does
* not guarantee that.
* - Product of two FP32 values is generally not exactly representation as
* an FP32 value, and will be rounded to nearest FP32 value with ties to
* even.
*/
const psimd_f32 x_scaled = psimd_cvt_s32_f32(x) * vscale;
const psimd_f32 y_scaled = psimd_cvt_s32_f32(y) * vscale;
const psimd_f32 z_scaled = psimd_cvt_s32_f32(z) * vscale;
const psimd_f32 w_scaled = psimd_cvt_s32_f32(w) * vscale;
/*
* Clang/gcc vector extension does not provide an intrinsics for a
* floating-point to integer conversion operation with
* rounding-to-nearest-even. In lieu of such intrinsic, we use a magic trick
* of adding a large number (1.5 * 2**23) to scaled value to cause rounding
* to integer, and then substracing this magic number as integer. This trick
* works only in a limited range (absolute value of input must be less than
* 2**22), so generally we have to clamp input to this range before using
* the magic. However, clamping to any smaller range works just as well, and
* thus we clamp to [qmin - zero point, qmax - zero point] range so that
* after we add zero point to the result, it gets into target [qmin, qmax]
* range.
*/
const psimd_f32 x_clamped =
psimd_min_f32(psimd_max_f32(x_scaled, vfmin), vfmax);
const psimd_f32 y_clamped =
psimd_min_f32(psimd_max_f32(y_scaled, vfmin), vfmax);
const psimd_f32 z_clamped =
psimd_min_f32(psimd_max_f32(z_scaled, vfmin), vfmax);
const psimd_f32 w_clamped =
psimd_min_f32(psimd_max_f32(w_scaled, vfmin), vfmax);
/*
* Conversion to integer using the "magic trick". Rounding is performed in
* the output of addition operation, and result is rounded to nearest even
* integer with ties to even.
*/
const psimd_s32 x_biased = (psimd_s32)(x_clamped + vfmagic) - vimagic;
const psimd_s32 y_biased = (psimd_s32)(y_clamped + vfmagic) - vimagic;
const psimd_s32 z_biased = (psimd_s32)(z_clamped + vfmagic) - vimagic;
const psimd_s32 w_biased = (psimd_s32)(w_clamped + vfmagic) - vimagic;
/*
* Select low 8 bits of each 32-bit integer in the vectors for the output.
* Since result is already clamped to [qmin, qmax] subrange of [0, 255],
* saturation is not needed.
*/
const psimd_u16 xy_packed =
psimd_concat_even_u16((psimd_u16)x_biased, (psimd_u16)y_biased);
const psimd_u16 zw_packed =
psimd_concat_even_u16((psimd_u16)z_biased, (psimd_u16)w_biased);
const psimd_u8 xyzw_packed =
psimd_concat_even_u8((psimd_u8)xy_packed, (psimd_u8)zw_packed);
psimd_store_u8(output, xyzw_packed);
output += 16;
}
}
| 4,345
| 39.240741
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/fp32-scalar.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <math.h>
#include <stdint.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_fp32__scalar_lrintf(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const long lmin =
(long)((int32_t)(uint32_t)qmin - (int32_t)(uint32_t)zero_point);
const long lmax =
(long)((int32_t)(uint32_t)qmax - (int32_t)(uint32_t)zero_point);
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
const float x_scaled = (float)x * scale;
const float y_scaled = (float)y * scale;
const float z_scaled = (float)z * scale;
const float w_scaled = (float)w * scale;
const long x_rounded = lrintf(x_scaled);
const long y_rounded = lrintf(y_scaled);
const long z_rounded = lrintf(z_scaled);
const long w_rounded = lrintf(w_scaled);
const int32_t x_clamped = (int32_t)(
x_rounded < lmin ? lmin : x_rounded > lmax ? lmax : x_rounded);
const int32_t y_clamped = (int32_t)(
y_rounded < lmin ? lmin : y_rounded > lmax ? lmax : y_rounded);
const int32_t z_clamped = (int32_t)(
z_rounded < lmin ? lmin : z_rounded > lmax ? lmax : z_rounded);
const int32_t w_clamped = (int32_t)(
w_rounded < lmin ? lmin : w_rounded > lmax ? lmax : w_rounded);
const int32_t x_biased = x_clamped + (int32_t)(uint32_t)zero_point;
const int32_t y_biased = y_clamped + (int32_t)(uint32_t)zero_point;
const int32_t z_biased = z_clamped + (int32_t)(uint32_t)zero_point;
const int32_t w_biased = w_clamped + (int32_t)(uint32_t)zero_point;
output[0] = (uint8_t)x_biased;
output[1] = (uint8_t)y_biased;
output[2] = (uint8_t)z_biased;
output[3] = (uint8_t)w_biased;
output += 4;
}
}
void pytorch_qnnp_requantize_fp32__scalar_magic(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const float fmin =
(float)((int32_t)(uint32_t)qmin - (int32_t)(uint32_t)zero_point);
const float fmax =
(float)((int32_t)(uint32_t)qmax - (int32_t)(uint32_t)zero_point);
const float fmagic = 12582912.0f;
const int32_t imagic = INT32_C(0x4B400000) - (int32_t)(uint32_t)zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
const float x_scaled = (float)x * scale;
const float y_scaled = (float)y * scale;
const float z_scaled = (float)z * scale;
const float w_scaled = (float)w * scale;
const float x_clamped =
x_scaled < fmin ? fmin : x_scaled > fmax ? fmax : x_scaled;
const float y_clamped =
y_scaled < fmin ? fmin : y_scaled > fmax ? fmax : y_scaled;
const float z_clamped =
z_scaled < fmin ? fmin : z_scaled > fmax ? fmax : z_scaled;
const float w_clamped =
w_scaled < fmin ? fmin : w_scaled > fmax ? fmax : w_scaled;
const int32_t x_biased = (int32_t)fp32_to_bits(x_clamped + fmagic) - imagic;
const int32_t y_biased = (int32_t)fp32_to_bits(y_clamped + fmagic) - imagic;
const int32_t z_biased = (int32_t)fp32_to_bits(z_clamped + fmagic) - imagic;
const int32_t w_biased = (int32_t)fp32_to_bits(w_clamped + fmagic) - imagic;
output[0] = (uint8_t)x_biased;
output[1] = (uint8_t)y_biased;
output[2] = (uint8_t)z_biased;
output[3] = (uint8_t)w_biased;
output += 4;
}
}
| 4,031
| 32.04918
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/fp32-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <emmintrin.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_fp32__sse2(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const __m128 vscale = _mm_set1_ps(scale);
const __m128i vzero_point = _mm_set1_epi16((short)(uint16_t)zero_point);
const __m128i vqmin = _mm_set1_epi8((char)qmin);
const __m128i vqmax = _mm_set1_epi8((char)qmax);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*)input);
const __m128i y = _mm_loadu_si128((const __m128i*)(input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*)(input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*)(input + 12));
input += 16;
/*
* Convert int32_t input to FP32 and multiply by FP32 scale.
* Both operations involve statistically unbiased roundings (with default
* MXCSR rounding mode):
* - Large int32_t values can't be exactly represented as FP32. CVTDQ2PS
* instruction on x86 would round it according to nearest FP32 value with
* ties to even (assuming default MXCSR rounding mode).
* - Product of two FP32 values is generally not exactly representation as
* an FP32 value, and will be rounded to nearest FP32 value with ties to
* even with default MXCSR rounding mode.
*/
const __m128 x_scaled = _mm_mul_ps(_mm_cvtepi32_ps(x), vscale);
const __m128 y_scaled = _mm_mul_ps(_mm_cvtepi32_ps(y), vscale);
const __m128 z_scaled = _mm_mul_ps(_mm_cvtepi32_ps(z), vscale);
const __m128 w_scaled = _mm_mul_ps(_mm_cvtepi32_ps(w), vscale);
/*
* Convert scaled FP32 result to int32_t using CVTPS2DQ instruction from x86
* SSE2. CVTPS2DQ instruction rounds result according to nearest FP32 value
* with ties to even (assuming default MXCSR rounding mode). However, when
* conversion overflows, it produces INT32_MIN as a result. For large
* positive inputs the result of conversion can become negative, which
* affects the final requantization result. Note that on x86 SSE2 we have
* e.g. int32_t(float(INT32_MAX)) == INT32_MIN! This happens because
* float(INT32_MAX) rounds to 2**31, which overflows int32_t when it is
* converted back to integer.
*
* Thankfully, we can prove that overflow never happens in this
* requantization scheme. The largest positive input is INT32_MAX (2**31 -
* 1), which turns into 2**31 when converted to float. The largest scale
* value is 0x1.FFFFFEp-1. When multiplied together, the result is
* 2147483520 (compare to INT32_MAX = 2147483647), which fits into int32_t
* without overflow.
*/
const __m128i x_rounded = _mm_cvtps_epi32(x_scaled);
const __m128i y_rounded = _mm_cvtps_epi32(y_scaled);
const __m128i z_rounded = _mm_cvtps_epi32(z_scaled);
const __m128i w_rounded = _mm_cvtps_epi32(w_scaled);
/*
* Standard final sequence on x86 SSE2:
* - Pack to int16_t and saturate
* - Add zero point
* - Pack to uint8_t and saturate
* - Clamp between qmin and qmax
*/
const __m128i xy_packed =
_mm_adds_epi16(_mm_packs_epi32(x_rounded, y_rounded), vzero_point);
const __m128i zw_packed =
_mm_adds_epi16(_mm_packs_epi32(z_rounded, w_rounded), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped =
_mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
/*
* 4x CVTDQ2PS
* 4x MULPS
* 4x CVTPS2DQ
* 2x PACKSSDW
* 1x PACKUSWB
* 2x PADDW
* 1x PMAXUB
* 1x PMINUB
* ---------------------
* 19 instructions total
*/
_mm_storeu_si128((__m128i*)output, xyzw_clamped);
output += 16;
}
}
| 4,169
| 36.909091
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/gemmlowp-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <arm_neon.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
/*
* The requantization implementation below is adapted from Google's gemmlowp
* library. It is only used in QNNPACK unit tests and comparative benchmarks,
* but not the library itself.
*/
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
void pytorch_qnnp_requantize_gemmlowp__neon(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
/* Compute requantization parameters */
const uint32_t multiplier =
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7;
const int32_t exponent = (fp32_to_bits(scale) >> 23) - 127 - 23 - 7;
const int32_t shift =
-(32 /* using high 32 bits in VQRDMUL */ - 1 /* doubling in VQRDMUL */ +
exponent);
const int32x4_t vmultiplier = vdupq_n_s32(multiplier);
const int16x8_t vzero_point = vdupq_n_s16((int16_t)(uint16_t)zero_point);
const int32x4_t vshift = vdupq_n_s32(-shift);
const uint8x16_t vqmin = vdupq_n_u8(qmin);
const uint8x16_t vqmax = vdupq_n_u8(qmax);
for (; n != 0; n -= 16) {
const int32x4_t x = vld1q_s32(input);
const int32x4_t y = vld1q_s32(input + 4);
const int32x4_t z = vld1q_s32(input + 8);
const int32x4_t w = vld1q_s32(input + 12);
input += 16;
const int32x4_t x_product = vqrdmulhq_s32(x, vmultiplier);
const int32x4_t y_product = vqrdmulhq_s32(y, vmultiplier);
const int32x4_t z_product = vqrdmulhq_s32(z, vmultiplier);
const int32x4_t w_product = vqrdmulhq_s32(w, vmultiplier);
const int32x4_t x_product_fixup = vshrq_n_s32(vandq_s32(x, vshift), 31);
const int32x4_t y_product_fixup = vshrq_n_s32(vandq_s32(y, vshift), 31);
const int32x4_t z_product_fixup = vshrq_n_s32(vandq_s32(z, vshift), 31);
const int32x4_t w_product_fixup = vshrq_n_s32(vandq_s32(w, vshift), 31);
const int32x4_t x_adjusted_product = vqaddq_s32(x_product, x_product_fixup);
const int32x4_t y_adjusted_product = vqaddq_s32(y_product, y_product_fixup);
const int32x4_t z_adjusted_product = vqaddq_s32(z_product, z_product_fixup);
const int32x4_t w_adjusted_product = vqaddq_s32(w_product, w_product_fixup);
const int32x4_t x_scaled = vrshlq_s32(x_adjusted_product, vshift);
const int32x4_t y_scaled = vrshlq_s32(y_adjusted_product, vshift);
const int32x4_t z_scaled = vrshlq_s32(z_adjusted_product, vshift);
const int32x4_t w_scaled = vrshlq_s32(w_adjusted_product, vshift);
#ifdef __aarch64__
const int16x8_t xy_packed = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(x_scaled), y_scaled), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(z_scaled), w_scaled), vzero_point);
const uint8x16_t xyzw_packed =
vqmovun_high_s16(vqmovun_s16(xy_packed), zw_packed);
#else
const int16x8_t xy_packed = vqaddq_s16(
vcombine_s16(vqmovn_s32(x_scaled), vqmovn_s32(y_scaled)), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(
vcombine_s16(vqmovn_s32(z_scaled), vqmovn_s32(w_scaled)), vzero_point);
const uint8x16_t xyzw_packed =
vcombine_u8(vqmovun_s16(xy_packed), vqmovun_s16(zw_packed));
#endif
const uint8x16_t xyzw_clamped =
vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin);
vst1q_u8(output, xyzw_clamped);
output += 16;
}
}
| 4,345
| 37.122807
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/gemmlowp-scalar.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
#include <qnnpack/scalar-utils.h>
#include "gemmlowp-scalar.h"
void pytorch_qnnp_requantize_gemmlowp__scalar(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
/* Compute requantization parameters */
const uint32_t multiplier =
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7;
const int32_t exponent = (fp32_to_bits(scale) >> 23) - 127 - 23 - 7;
const int32_t shift =
-(32 /* using high 32 bits in VQRDMUL */ - 1 /* doubling in VQRDMUL */ +
exponent);
const int32_t smin = (int32_t)(uint32_t)qmin;
const int32_t smax = (int32_t)(uint32_t)qmax;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
const int32_t x_product = gemmlowp_scalar_vqrdmulh_s32(x, multiplier);
const int32_t y_product = gemmlowp_scalar_vqrdmulh_s32(y, multiplier);
const int32_t z_product = gemmlowp_scalar_vqrdmulh_s32(z, multiplier);
const int32_t w_product = gemmlowp_scalar_vqrdmulh_s32(w, multiplier);
const int32_t x_scaled = gemmlowp_scalar_rdivbypo2_s32(x_product, shift);
const int32_t y_scaled = gemmlowp_scalar_rdivbypo2_s32(y_product, shift);
const int32_t z_scaled = gemmlowp_scalar_rdivbypo2_s32(z_product, shift);
const int32_t w_scaled = gemmlowp_scalar_rdivbypo2_s32(w_product, shift);
/* Add zero point to scaled value */
const int32_t x_biased = x_scaled + zero_point;
const int32_t y_biased = y_scaled + zero_point;
const int32_t z_biased = z_scaled + zero_point;
const int32_t w_biased = w_scaled + zero_point;
/* Clamp scaled value with zero point between smin and smax */
const int32_t x_clamped =
x_biased < smin ? smin : x_biased > smax ? smax : x_biased;
const int32_t y_clamped =
y_biased < smin ? smin : y_biased > smax ? smax : y_biased;
const int32_t z_clamped =
z_biased < smin ? smin : z_biased > smax ? smax : z_biased;
const int32_t w_clamped =
w_biased < smin ? smin : w_biased > smax ? smax : w_biased;
output[0] = (uint8_t)x_clamped;
output[1] = (uint8_t)y_clamped;
output[2] = (uint8_t)z_clamped;
output[3] = (uint8_t)w_clamped;
output += 4;
}
}
| 2,809
| 33.268293
| 78
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/gemmlowp-scalar.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <limits.h>
#include <stdint.h>
/*
* The code below is adapted from Google's gemmlowp library.
* It is only used in QNNPACK unit tests and comparative benchmarks,
* but not the library itself.
*/
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
inline static int32_t gemmlowp_scalar_vqrdmulh_s32(int32_t a, int32_t b) {
const bool overflow = a == b && a == INT32_MIN;
const int64_t ab_64 = (int64_t)a * (int64_t)b;
const int32_t nudge =
(a ^ b) >= 0 ? INT32_C(0x40000000) : -INT32_C(0x3FFFFFFF);
const int32_t ab_x2_high32 = (int32_t)((ab_64 + nudge) / INT64_C(0x80000000));
return overflow ? INT32_MAX : ab_x2_high32;
}
inline static int32_t gemmlowp_scalar_rdivbypo2_s32(int32_t x, int exponent) {
const int32_t mask = ((1 << exponent) - 1);
const int32_t remainder = x & mask;
const int32_t threshold = (mask >> 1) + (int32_t)(x < 0);
return asr_s32(x, exponent) + (int32_t)(remainder > threshold);
}
| 1,737
| 34.469388
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/gemmlowp-sse.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <limits.h>
#include <immintrin.h>
/*
* The code below is adapted from Google's gemmlowp library.
* It is only used in QNNPACK unit tests and comparative benchmarks,
* but not the library itself.
*/
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
static inline __m128i gemmlowp_sse_rdivbypo2_s32(__m128i x, int exponent) {
const __m128i mask =
_mm_set1_epi32((int32_t)((UINT64_C(1) << exponent) - UINT64_C(1)));
const __m128i remainder = _mm_and_si128(x, mask);
const __m128i threshold = _mm_sub_epi32(
_mm_srli_epi32(mask, 1), _mm_cmplt_epi32(x, _mm_setzero_si128()));
return _mm_sub_epi32(
_mm_sra_epi32(x, _mm_cvtsi32_si128(exponent)),
_mm_cmpgt_epi32(remainder, threshold));
}
static inline __m128i gemmlowp_sse_mul_s32(__m128i a, __m128i b) {
#ifdef __SSE4_1__
return _mm_mul_epi32(a, b);
#else
__m128i sign, zero, mul_us, a_neg, b_neg, mul_us_neg;
sign = _mm_xor_si128(a, b);
sign = _mm_srai_epi32(sign, 31); // promote sign bit to all fields, all fff if
// negative and all 0 if positive
sign = _mm_shuffle_epi32(
sign,
_MM_SHUFFLE(2, 2, 0, 0)); // promote sign bit to 3 and 1st data lanes
zero = _mm_setzero_si128();
#ifdef __SSSE3__
a_neg = _mm_abs_epi32(a); // negate a and b
b_neg = _mm_abs_epi32(b); // negate a and b
#else /* pre-SSSE3 */
const __m128i a_neg_mask = _mm_cmplt_epi32(a, zero);
a_neg = _mm_sub_epi32(_mm_xor_si128(a, a_neg_mask), a_neg_mask);
const __m128i b_neg_mask = _mm_cmplt_epi32(b, zero);
b_neg = _mm_sub_epi32(_mm_xor_si128(b, b_neg_mask), b_neg_mask);
#endif /* pre-SSSE3 */
mul_us = _mm_mul_epu32(a_neg, b_neg); // uses 0 and 2nd data lanes, (abs), the
// multiplication gives 64 bit result
mul_us_neg = _mm_sub_epi64(zero, mul_us);
mul_us_neg = _mm_and_si128(sign, mul_us_neg);
mul_us = _mm_andnot_si128(sign, mul_us);
return _mm_or_si128(mul_us, mul_us_neg);
#endif
}
static inline __m128i gemmlowp_sse_vqrdmulh_s32(__m128i a, __m128i b) {
// saturation only happen if a == b == INT32_MIN
const __m128i min = _mm_set1_epi32(INT32_MIN);
const __m128i saturation_mask =
_mm_and_si128(_mm_cmpeq_epi32(a, b), _mm_cmpeq_epi32(a, min));
// a = a0 | a1 | a2 | a3
// b = b0 | b1 | b2 | b3
const __m128i a0_a2 = a;
const __m128i a1_a3 = _mm_srli_si128(a, 4);
const __m128i b0_b2 = b;
const __m128i b1_b3 = _mm_srli_si128(b, 4);
const __m128i a0b0_a2b2 = gemmlowp_sse_mul_s32(a0_a2, b0_b2);
const __m128i a1b1_a3b3 = gemmlowp_sse_mul_s32(a1_a3, b1_b3);
// do the rounding and take into account that it will be doubled
const __m128i nudge = _mm_set1_epi64x(1 << 30);
const __m128i a0b0_a2b2_rounded = _mm_add_epi64(a0b0_a2b2, nudge);
const __m128i a1b1_a3b3_rounded = _mm_add_epi64(a1b1_a3b3, nudge);
// do the doubling
const __m128i a0b0_a2b2_rounded_2x = _mm_slli_epi64(a0b0_a2b2_rounded, 1);
const __m128i a1b1_a3b3_rounded_2x = _mm_slli_epi64(a1b1_a3b3_rounded, 1);
// get the high part of the products
#ifdef __SSE4_1__
const __m128i result = _mm_blend_epi16(
_mm_srli_epi64(a0b0_a2b2_rounded_2x, 32), a1b1_a3b3_rounded_2x, 0xCC);
#else
const __m128i result0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(a0b0_a2b2_rounded_2x),
_mm_castsi128_ps(a1b1_a3b3_rounded_2x),
_MM_SHUFFLE(3, 1, 3, 1)));
const __m128i result = _mm_shuffle_epi32(result0213, _MM_SHUFFLE(3, 1, 2, 0));
#endif
// saturate those which overflowed
#ifdef __SSE4_1__
const __m128i saturated_result =
_mm_blendv_epi8(result, min, saturation_mask);
#else
const __m128i saturated_result = _mm_or_si128(
_mm_and_si128(saturation_mask, min),
_mm_andnot_si128(saturation_mask, result));
#endif
return saturated_result;
}
| 4,591
| 36.032258
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/gemmlowp-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <emmintrin.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
#include "gemmlowp-sse.h"
void pytorch_qnnp_requantize_gemmlowp__sse2(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
/* Compute requantization parameters */
const uint32_t multiplier =
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7;
const int32_t exponent = (fp32_to_bits(scale) >> 23) - 127 - 23 - 7;
const int32_t shift =
-(32 /* using high 32 bits in VQRDMUL */ - 1 /* doubling in VQRDMUL */ +
exponent);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short)(uint16_t)zero_point);
const __m128i vqmin = _mm_set1_epi8((char)qmin);
const __m128i vqmax = _mm_set1_epi8((char)qmax);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*)input);
const __m128i y = _mm_loadu_si128((const __m128i*)(input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*)(input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*)(input + 12));
input += 16;
const __m128i x_product = gemmlowp_sse_vqrdmulh_s32(x, vmultiplier);
const __m128i y_product = gemmlowp_sse_vqrdmulh_s32(y, vmultiplier);
const __m128i z_product = gemmlowp_sse_vqrdmulh_s32(z, vmultiplier);
const __m128i w_product = gemmlowp_sse_vqrdmulh_s32(w, vmultiplier);
const __m128i x_scaled = gemmlowp_sse_rdivbypo2_s32(x_product, shift);
const __m128i y_scaled = gemmlowp_sse_rdivbypo2_s32(y_product, shift);
const __m128i z_scaled = gemmlowp_sse_rdivbypo2_s32(z_product, shift);
const __m128i w_scaled = gemmlowp_sse_rdivbypo2_s32(w_product, shift);
const __m128i xy_packed =
_mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed =
_mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped =
_mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
_mm_storeu_si128((__m128i*)output, xyzw_clamped);
output += 16;
}
}
| 2,633
| 34.594595
| 78
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/gemmlowp-sse4.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <smmintrin.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
#include "gemmlowp-sse.h"
void pytorch_qnnp_requantize_gemmlowp__sse4(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
/* Compute requantization parameters */
const uint32_t multiplier =
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7;
const int32_t exponent = (fp32_to_bits(scale) >> 23) - 127 - 23 - 7;
const int32_t shift =
-(32 /* using high 32 bits in VQRDMUL */ - 1 /* doubling in VQRDMUL */ +
exponent);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short)(uint16_t)zero_point);
const __m128i vqmin = _mm_set1_epi8((char)qmin);
const __m128i vqmax = _mm_set1_epi8((char)qmax);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*)input);
const __m128i y = _mm_loadu_si128((const __m128i*)(input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*)(input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*)(input + 12));
input += 16;
const __m128i x_product = gemmlowp_sse_vqrdmulh_s32(x, vmultiplier);
const __m128i y_product = gemmlowp_sse_vqrdmulh_s32(y, vmultiplier);
const __m128i z_product = gemmlowp_sse_vqrdmulh_s32(z, vmultiplier);
const __m128i w_product = gemmlowp_sse_vqrdmulh_s32(w, vmultiplier);
const __m128i x_scaled = gemmlowp_sse_rdivbypo2_s32(x_product, shift);
const __m128i y_scaled = gemmlowp_sse_rdivbypo2_s32(y_product, shift);
const __m128i z_scaled = gemmlowp_sse_rdivbypo2_s32(z_product, shift);
const __m128i w_scaled = gemmlowp_sse_rdivbypo2_s32(w_product, shift);
const __m128i xy_packed =
_mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed =
_mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped =
_mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
_mm_storeu_si128((__m128i*)output, xyzw_clamped);
output += 16;
}
}
| 2,633
| 34.594595
| 78
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/gemmlowp-ssse3.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <tmmintrin.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
#include "gemmlowp-sse.h"
void pytorch_qnnp_requantize_gemmlowp__ssse3(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
/* Compute requantization parameters */
const uint32_t multiplier =
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7;
const int32_t exponent = (fp32_to_bits(scale) >> 23) - 127 - 23 - 7;
const int32_t shift =
-(32 /* using high 32 bits in VQRDMUL */ - 1 /* doubling in VQRDMUL */ +
exponent);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short)(uint16_t)zero_point);
const __m128i vqmin = _mm_set1_epi8((char)qmin);
const __m128i vqmax = _mm_set1_epi8((char)qmax);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*)input);
const __m128i y = _mm_loadu_si128((const __m128i*)(input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*)(input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*)(input + 12));
input += 16;
const __m128i x_product = gemmlowp_sse_vqrdmulh_s32(x, vmultiplier);
const __m128i y_product = gemmlowp_sse_vqrdmulh_s32(y, vmultiplier);
const __m128i z_product = gemmlowp_sse_vqrdmulh_s32(z, vmultiplier);
const __m128i w_product = gemmlowp_sse_vqrdmulh_s32(w, vmultiplier);
const __m128i x_scaled = gemmlowp_sse_rdivbypo2_s32(x_product, shift);
const __m128i y_scaled = gemmlowp_sse_rdivbypo2_s32(y_product, shift);
const __m128i z_scaled = gemmlowp_sse_rdivbypo2_s32(z_product, shift);
const __m128i w_scaled = gemmlowp_sse_rdivbypo2_s32(w_product, shift);
const __m128i xy_packed =
_mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed =
_mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped =
_mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
_mm_storeu_si128((__m128i*)output, xyzw_clamped);
output += 16;
}
}
| 2,634
| 34.608108
| 78
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/precise-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <arm_neon.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_precise__neon(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
const int32_t multiplier =
((int32_t)scale_bits & INT32_C(0x007FFFFF)) | INT32_C(0x00800000);
const int32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
#if defined(__aarch64__)
const int32x4_t vmultiplier = vdupq_n_s32(multiplier);
#else
const int32x2_t vmultiplier = vdup_n_s32(multiplier);
#endif
const int16x8_t vzero_point = vdupq_n_s16((int16_t)(uint16_t)zero_point);
const int64x2_t vshift = vdupq_n_s64(-shift);
const uint8x16_t vqmin = vdupq_n_u8(qmin);
const uint8x16_t vqmax = vdupq_n_u8(qmax);
for (; n != 0; n -= 16) {
const int32x4_t x = vld1q_s32(input);
const int32x4_t y = vld1q_s32(input + 4);
const int32x4_t z = vld1q_s32(input + 8);
const int32x4_t w = vld1q_s32(input + 12);
input += 16;
const uint32x4_t x_neg_mask = vcltq_s32(x, vmovq_n_s32(0));
const uint32x4_t y_neg_mask = vcltq_s32(y, vmovq_n_s32(0));
const uint32x4_t z_neg_mask = vcltq_s32(z, vmovq_n_s32(0));
const uint32x4_t w_neg_mask = vcltq_s32(w, vmovq_n_s32(0));
#if defined(__aarch64__)
const int64x2_t x01_product =
vmull_s32(vget_low_s32(x), vget_low_s32(vmultiplier));
const int64x2_t x23_product = vmull_high_s32(x, vmultiplier);
const int64x2_t y01_product =
vmull_s32(vget_low_s32(y), vget_low_s32(vmultiplier));
const int64x2_t y23_product = vmull_high_s32(y, vmultiplier);
const int64x2_t z01_product =
vmull_s32(vget_low_s32(z), vget_low_s32(vmultiplier));
const int64x2_t z23_product = vmull_high_s32(z, vmultiplier);
const int64x2_t w01_product =
vmull_s32(vget_low_s32(w), vget_low_s32(vmultiplier));
const int64x2_t w23_product = vmull_high_s32(w, vmultiplier);
#else
const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vmultiplier);
const int64x2_t x23_product = vmull_s32(vget_high_s32(x), vmultiplier);
const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vmultiplier);
const int64x2_t y23_product = vmull_s32(vget_high_s32(y), vmultiplier);
const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vmultiplier);
const int64x2_t z23_product = vmull_s32(vget_high_s32(z), vmultiplier);
const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vmultiplier);
const int64x2_t w23_product = vmull_s32(vget_high_s32(w), vmultiplier);
#endif
#if defined(__aarch64__)
const int64x2_t x01_adjusted_product =
vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_neg_mask)));
const int64x2_t x23_adjusted_product =
vaddw_high_s32(x23_product, vreinterpretq_s32_u32(x_neg_mask));
const int64x2_t y01_adjusted_product =
vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_neg_mask)));
const int64x2_t y23_adjusted_product =
vaddw_high_s32(y23_product, vreinterpretq_s32_u32(y_neg_mask));
const int64x2_t z01_adjusted_product =
vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_neg_mask)));
const int64x2_t z23_adjusted_product =
vaddw_high_s32(z23_product, vreinterpretq_s32_u32(z_neg_mask));
const int64x2_t w01_adjusted_product =
vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_neg_mask)));
const int64x2_t w23_adjusted_product =
vaddw_high_s32(w23_product, vreinterpretq_s32_u32(w_neg_mask));
#else
const int64x2_t x01_adjusted_product =
vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_neg_mask)));
const int64x2_t x23_adjusted_product =
vaddw_s32(x23_product, vreinterpret_s32_u32(vget_high_u32(x_neg_mask)));
const int64x2_t y01_adjusted_product =
vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_neg_mask)));
const int64x2_t y23_adjusted_product =
vaddw_s32(y23_product, vreinterpret_s32_u32(vget_high_u32(y_neg_mask)));
const int64x2_t z01_adjusted_product =
vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_neg_mask)));
const int64x2_t z23_adjusted_product =
vaddw_s32(z23_product, vreinterpret_s32_u32(vget_high_u32(z_neg_mask)));
const int64x2_t w01_adjusted_product =
vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_neg_mask)));
const int64x2_t w23_adjusted_product =
vaddw_s32(w23_product, vreinterpret_s32_u32(vget_high_u32(w_neg_mask)));
#endif
const int64x2_t x01_scaled = vrshlq_s64(x01_adjusted_product, vshift);
const int64x2_t x23_scaled = vrshlq_s64(x23_adjusted_product, vshift);
const int64x2_t y01_scaled = vrshlq_s64(y01_adjusted_product, vshift);
const int64x2_t y23_scaled = vrshlq_s64(y23_adjusted_product, vshift);
const int64x2_t z01_scaled = vrshlq_s64(z01_adjusted_product, vshift);
const int64x2_t z23_scaled = vrshlq_s64(z23_adjusted_product, vshift);
const int64x2_t w01_scaled = vrshlq_s64(w01_adjusted_product, vshift);
const int64x2_t w23_scaled = vrshlq_s64(w23_adjusted_product, vshift);
#ifdef __aarch64__
const int32x4_t x_scaled = vuzp1q_s32(
vreinterpretq_s32_s64(x01_scaled), vreinterpretq_s32_s64(x23_scaled));
const int32x4_t y_scaled = vuzp1q_s32(
vreinterpretq_s32_s64(y01_scaled), vreinterpretq_s32_s64(y23_scaled));
const int32x4_t z_scaled = vuzp1q_s32(
vreinterpretq_s32_s64(z01_scaled), vreinterpretq_s32_s64(z23_scaled));
const int32x4_t w_scaled = vuzp1q_s32(
vreinterpretq_s32_s64(w01_scaled), vreinterpretq_s32_s64(w23_scaled));
const int16x8_t xy_packed = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(x_scaled), y_scaled), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(z_scaled), w_scaled), vzero_point);
const uint8x16_t xyzw_packed =
vqmovun_high_s16(vqmovun_s16(xy_packed), zw_packed);
#else
const int32x4_t x_scaled =
vcombine_s32(vmovn_s64(x01_scaled), vmovn_s64(x23_scaled));
const int32x4_t y_scaled =
vcombine_s32(vmovn_s64(y01_scaled), vmovn_s64(y23_scaled));
const int32x4_t z_scaled =
vcombine_s32(vmovn_s64(z01_scaled), vmovn_s64(z23_scaled));
const int32x4_t w_scaled =
vcombine_s32(vmovn_s64(w01_scaled), vmovn_s64(w23_scaled));
const int16x8_t xy_packed = vqaddq_s16(
vcombine_s16(vqmovn_s32(x_scaled), vqmovn_s32(y_scaled)), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(
vcombine_s16(vqmovn_s32(z_scaled), vqmovn_s32(w_scaled)), vzero_point);
const uint8x16_t xyzw_packed =
vcombine_u8(vqmovun_s16(xy_packed), vqmovun_s16(zw_packed));
#endif
const uint8x16_t xyzw_clamped =
vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin);
/*
* AArch32 version:
* 4x VCLT.S32 Qd, Qm, #0
* 8x VMULL.S32 Qd, Dm, Dn
* 8x VADDW.S32 Qd, Qm, Dn
* 8x VRSHL.S32 Qd, Qm, Qn
* 8x VMOVN.S64 Dd, Qm
* 4x VQMOVN.S32 Dd, Qm
* 2x VADD.S16 Qd, Qm, Qn
* 2x VQMOVUN.S16 Dd, Qm
* 1x VMAX.U8 Qd, Qm, Qn
* 1x VMIN.U8 Qd, Qm, Qn
* ---------------------
* 46 instructions total
*
* AArch64 version:
* 4x CMLT Vd.4S, Vn.4S, #0
* 4x SMULL Vd.2D, Vn.2S, Vm.2S
* 4x SMULL2 Vd.2D, Vn.4S, Vm.4S
* 4x SADDW Vd.2D, Vn.2D, Vm.2S
* 4x SADDW2 Vd.2D, Vn.2D, Vm.4S
* 8x SRSHL Vd.2D, Vn.2D, Vm.2D
* 4x UZP1 Vd.4S, Vn.4S, Vm.4S
* 2x SQXTN Vd.4H, Vn.4S
* 2x SQXTN2 Vd.8H, Vn.4S
* 2x ADD Vd.8H, Vn.8H, Vm.8H
* 1x SQXTUN Vd.8B, Vn.8H
* 1x SQXTUN2 Vd.16B, Vn.8H
* 1x UMIN Vd.16B, Vn.16B, Vm.16B
* 1x UMAX Vd.16B, Vn.16B, Vm.16B
* ---------------------
* 42 instructions total
*/
vst1q_u8(output, xyzw_clamped);
output += 16;
}
}
| 8,353
| 40.562189
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/precise-psimd.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <psimd.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_precise__psimd(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
const uint32_t multiplier = (scale_bits << 8) | UINT32_C(0x80000000);
const uint32_t shift = 127 + 31 - (scale_bits >> 23);
assert(shift >= 32);
assert(shift < 64);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const psimd_u32 vmultiplier_lo =
psimd_splat_u32(multiplier & UINT32_C(0x0000FFFF));
const psimd_u32 vmultiplier_hi = psimd_splat_u32(multiplier >> 16);
const psimd_s32 vzero_point = psimd_splat_s32((int32_t)(uint32_t)zero_point);
const psimd_s32 vsmin =
psimd_splat_s32((int32_t)(uint32_t)qmin - (int32_t)(uint32_t)zero_point);
const psimd_s32 vsmax =
psimd_splat_s32((int32_t)(uint32_t)qmax - (int32_t)(uint32_t)zero_point);
const psimd_u32 vrounding_lo = psimd_splat_u32((uint32_t)rounding);
const psimd_u32 vrounding_hi = psimd_splat_u32((uint32_t)(rounding >> 32));
const psimd_u32 vshift = psimd_splat_u32(shift - 32);
for (; n != 0; n -= 16) {
const psimd_s32 x = psimd_load_s32(input);
const psimd_s32 y = psimd_load_s32(input + 4);
const psimd_s32 z = psimd_load_s32(input + 8);
const psimd_s32 w = psimd_load_s32(input + 12);
input += 16;
const psimd_s32 x_neg_mask = x >> psimd_splat_s32(31);
const psimd_s32 y_neg_mask = y >> psimd_splat_s32(31);
const psimd_s32 z_neg_mask = z >> psimd_splat_s32(31);
const psimd_s32 w_neg_mask = w >> psimd_splat_s32(31);
const psimd_u32 x_abs = (psimd_u32)((x ^ x_neg_mask) - x_neg_mask);
const psimd_u32 y_abs = (psimd_u32)((y ^ y_neg_mask) - y_neg_mask);
const psimd_u32 z_abs = (psimd_u32)((z ^ z_neg_mask) - z_neg_mask);
const psimd_u32 w_abs = (psimd_u32)((w ^ w_neg_mask) - w_neg_mask);
const psimd_u32 x_abs_lo = x_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
const psimd_u32 x_abs_hi = x_abs >> psimd_splat_u32(16);
const psimd_u32 y_abs_lo = y_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
const psimd_u32 y_abs_hi = y_abs >> psimd_splat_u32(16);
const psimd_u32 z_abs_lo = z_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
const psimd_u32 z_abs_hi = z_abs >> psimd_splat_u32(16);
const psimd_u32 w_abs_lo = w_abs & psimd_splat_u32(UINT32_C(0x0000FFFF));
const psimd_u32 w_abs_hi = w_abs >> psimd_splat_u32(16);
const psimd_u32 x_product_ll = x_abs_lo * vmultiplier_lo;
const psimd_u32 y_product_ll = y_abs_lo * vmultiplier_lo;
const psimd_u32 z_product_ll = z_abs_lo * vmultiplier_lo;
const psimd_u32 w_product_ll = w_abs_lo * vmultiplier_lo;
const psimd_u32 x_product_lh =
x_abs_lo * vmultiplier_hi + (x_product_ll >> psimd_splat_u32(16));
const psimd_u32 y_product_lh =
y_abs_lo * vmultiplier_hi + (y_product_ll >> psimd_splat_u32(16));
const psimd_u32 z_product_lh =
z_abs_lo * vmultiplier_hi + (z_product_ll >> psimd_splat_u32(16));
const psimd_u32 w_product_lh =
w_abs_lo * vmultiplier_hi + (w_product_ll >> psimd_splat_u32(16));
const psimd_u32 x_product_hl = x_abs_hi * vmultiplier_lo +
(x_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 y_product_hl = y_abs_hi * vmultiplier_lo +
(y_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 z_product_hl = z_abs_hi * vmultiplier_lo +
(z_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 w_product_hl = w_abs_hi * vmultiplier_lo +
(w_product_lh & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 x_product_lo = (x_product_hl << psimd_splat_u32(16)) +
(x_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 y_product_lo = (y_product_hl << psimd_splat_u32(16)) +
(y_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 z_product_lo = (z_product_hl << psimd_splat_u32(16)) +
(z_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 w_product_lo = (w_product_hl << psimd_splat_u32(16)) +
(w_product_ll & psimd_splat_u32(UINT32_C(0x0000FFFF)));
const psimd_u32 x_product_hi = x_abs_hi * vmultiplier_hi +
(x_product_lh >> psimd_splat_u32(16)) +
(x_product_hl >> psimd_splat_u32(16));
const psimd_u32 y_product_hi = y_abs_hi * vmultiplier_hi +
(y_product_lh >> psimd_splat_u32(16)) +
(y_product_hl >> psimd_splat_u32(16));
const psimd_u32 z_product_hi = z_abs_hi * vmultiplier_hi +
(z_product_lh >> psimd_splat_u32(16)) +
(z_product_hl >> psimd_splat_u32(16));
const psimd_u32 w_product_hi = w_abs_hi * vmultiplier_hi +
(w_product_lh >> psimd_splat_u32(16)) +
(w_product_hl >> psimd_splat_u32(16));
const psimd_u32 x_adjusted_product = (x_product_hi + vrounding_hi) -
((psimd_s32)(x_product_lo & vrounding_lo) >> psimd_splat_s32(31));
const psimd_u32 y_adjusted_product = (y_product_hi + vrounding_hi) -
((psimd_s32)(y_product_lo & vrounding_lo) >> psimd_splat_s32(31));
const psimd_u32 z_adjusted_product = (z_product_hi + vrounding_hi) -
((psimd_s32)(z_product_lo & vrounding_lo) >> psimd_splat_s32(31));
const psimd_u32 w_adjusted_product = (w_product_hi + vrounding_hi) -
((psimd_s32)(w_product_lo & vrounding_lo) >> psimd_splat_s32(31));
const psimd_u32 x_abs_scaled = x_adjusted_product >> vshift;
const psimd_u32 y_abs_scaled = y_adjusted_product >> vshift;
const psimd_u32 z_abs_scaled = z_adjusted_product >> vshift;
const psimd_u32 w_abs_scaled = w_adjusted_product >> vshift;
const psimd_s32 x_scaled =
(psimd_s32)(x_abs_scaled ^ x_neg_mask) - x_neg_mask;
const psimd_s32 y_scaled =
(psimd_s32)(y_abs_scaled ^ y_neg_mask) - y_neg_mask;
const psimd_s32 z_scaled =
(psimd_s32)(z_abs_scaled ^ z_neg_mask) - z_neg_mask;
const psimd_s32 w_scaled =
(psimd_s32)(w_abs_scaled ^ w_neg_mask) - w_neg_mask;
const psimd_u32 x_clamped =
(psimd_u32)psimd_max_s32(psimd_min_s32(x_scaled, vsmax), vsmin) +
vzero_point;
const psimd_u32 y_clamped =
(psimd_u32)psimd_max_s32(psimd_min_s32(y_scaled, vsmax), vsmin) +
vzero_point;
const psimd_u32 z_clamped =
(psimd_u32)psimd_max_s32(psimd_min_s32(z_scaled, vsmax), vsmin) +
vzero_point;
const psimd_u32 w_clamped =
(psimd_u32)psimd_max_s32(psimd_min_s32(w_scaled, vsmax), vsmin) +
vzero_point;
const psimd_u16 xy_clamped =
psimd_concat_even_u16((psimd_u16)x_clamped, (psimd_u16)y_clamped);
const psimd_u16 zw_clamped =
psimd_concat_even_u16((psimd_u16)z_clamped, (psimd_u16)w_clamped);
const psimd_u8 xyzw_clamped =
psimd_concat_even_u8((psimd_u8)xy_clamped, (psimd_u8)zw_clamped);
psimd_store_u8(output, xyzw_clamped);
output += 16;
}
}
| 7,397
| 43.566265
| 79
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/precise-scalar.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
#include <qnnpack/scalar-utils.h>
void pytorch_qnnp_requantize_precise__scalar_unsigned32(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
const uint32_t multiplier = (scale_bits << 8) | UINT32_C(0x80000000);
const uint32_t shift = 127 + 31 - (scale_bits >> 23);
assert(shift >= 32);
assert(shift < 64);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const uint32_t rounding_hi = (uint32_t)(rounding >> 32);
const uint32_t rounding_lo = (uint32_t)rounding;
const uint32_t shift_minus_32 = shift - 32;
const int32_t smin = (int32_t)(uint32_t)qmin - (int32_t)(uint32_t)zero_point;
const int32_t smax = (int32_t)(uint32_t)qmax - (int32_t)(uint32_t)zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
/*
* Compute absolute value of input as unsigned 32-bit int.
* All further computations will work with unsigned values to avoid
* undefined behaviour on signed operations.
*/
const uint32_t x_abs = (x >= 0) ? (uint32_t)x : -(uint32_t)x;
const uint32_t y_abs = (y >= 0) ? (uint32_t)y : -(uint32_t)y;
const uint32_t z_abs = (z >= 0) ? (uint32_t)z : -(uint32_t)z;
const uint32_t w_abs = (w >= 0) ? (uint32_t)w : -(uint32_t)w;
/* Compute full 64-bit product of 32-bit factors */
const uint64_t x_product = (uint64_t)x_abs * (uint64_t)multiplier;
const uint64_t y_product = (uint64_t)y_abs * (uint64_t)multiplier;
const uint64_t z_product = (uint64_t)z_abs * (uint64_t)multiplier;
const uint64_t w_product = (uint64_t)w_abs * (uint64_t)multiplier;
/*
* Shift the full 64-bit product right with rounding.
* Rounding is performed towards closest integer, with midpoints rounded up
* (same as away from zero).
*
* Generally, this operation requires both 64-bit addition and 64-bit shift,
* but we use two tricks to replace 64-bit operations with 32-bit
* operations.
*
* To avoid full 64-bit addition we make use of three facts:
* - 64-bit rounding value added before the shift is a power of 2, and thus
* has only one bit set.
* - When 0x1.0p-32f <= scale < 0x1.0p-31f, then the non-zero bit in
* rounding is in the low 32 bits, and rounding is exactly 0x80000000
* (2**31), because rounding is 2**(scale-1) and scale >= 32. In this case,
* addition of rounding can affect high 32 bits of the product only
* through overflow, which happens if low 32-bit part of the product equals
* or exceeds 0x80000000. We can reformulate the latter condition as low
* 32-bit part of the product has the bit 31 set, and then overflow happens
* if both the low 32-bit part of the product and the low 32-bit part of the
* rounding value have bit 31 set. Since 32-bit numbers with the bit 31 set
* are negative when interpreted as signed integers, we can check the
* overflow condition as (int32_t) (LOW(product) & LOW(rounding)) < 0
* - When 0x1.0p-31f <= scale < 1.0f, then the non-zero bit is in the high
* 32 bits of rounding. We just need to do 32-bit addition of high 32 bits
* of rounding and high 32 bits of product. This addition never overflows
* because product <= 0x80000000 * 0xFFFFFF00 < 2**63 and rounding =
* 2**(scale-1) <= 2**62.
*
* To avoid full 64-bit shift, we leverage the fact that shift >= 32, and do
* it in two steps:
* - Shift by 32, which can be implemented by extracting the high 32-bit word
* on 32-bit systems.
* - Shift by (shift - 32), which can be implemented as a 32-bit shift of
* high word of addition result.
*/
const uint32_t x_carry_lo =
(uint32_t)((int32_t)((uint32_t)x_product & rounding_lo) < 0);
const uint32_t y_carry_lo =
(uint32_t)((int32_t)((uint32_t)y_product & rounding_lo) < 0);
const uint32_t z_carry_lo =
(uint32_t)((int32_t)((uint32_t)z_product & rounding_lo) < 0);
const uint32_t w_carry_lo =
(uint32_t)((int32_t)((uint32_t)w_product & rounding_lo) < 0);
const uint32_t x_product_hi = (uint32_t)(x_product >> 32);
const uint32_t y_product_hi = (uint32_t)(y_product >> 32);
const uint32_t z_product_hi = (uint32_t)(z_product >> 32);
const uint32_t w_product_hi = (uint32_t)(w_product >> 32);
const uint32_t x_abs_scaled =
(uint32_t)(x_product_hi + rounding_hi + x_carry_lo) >> shift_minus_32;
const uint32_t y_abs_scaled =
(uint32_t)(y_product_hi + rounding_hi + y_carry_lo) >> shift_minus_32;
const uint32_t z_abs_scaled =
(uint32_t)(z_product_hi + rounding_hi + z_carry_lo) >> shift_minus_32;
const uint32_t w_abs_scaled =
(uint32_t)(w_product_hi + rounding_hi + w_carry_lo) >> shift_minus_32;
/* Copy the sign of input to scaled absolute input value */
const int32_t x_scaled = (int32_t)(x >= 0 ? x_abs_scaled : -x_abs_scaled);
const int32_t y_scaled = (int32_t)(y >= 0 ? y_abs_scaled : -y_abs_scaled);
const int32_t z_scaled = (int32_t)(z >= 0 ? z_abs_scaled : -z_abs_scaled);
const int32_t w_scaled = (int32_t)(w >= 0 ? w_abs_scaled : -w_abs_scaled);
/*
* Clamp scaled value with zero point between (qmin - zero point) and (qmax
* - zero point).
*/
const int32_t x_clamped =
x_scaled < smin ? smin : x_scaled > smax ? smax : x_scaled;
const int32_t y_clamped =
y_scaled < smin ? smin : y_scaled > smax ? smax : y_scaled;
const int32_t z_clamped =
z_scaled < smin ? smin : z_scaled > smax ? smax : z_scaled;
const int32_t w_clamped =
w_scaled < smin ? smin : w_scaled > smax ? smax : w_scaled;
/*
* Add zero point to clamped value.
* The result is guaranteed to be in [qmin, qmax] range.
*
* This addition can not be safely done before clamping, because scaled
* values are in [-2147483520, 2147483519] range, so addition of zero point
* (which can be up to 255) can overflow signed 32-bit integer.
*/
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (uint8_t)x_biased;
output[1] = (uint8_t)y_biased;
output[2] = (uint8_t)z_biased;
output[3] = (uint8_t)w_biased;
output += 4;
}
}
void pytorch_qnnp_requantize_precise__scalar_unsigned64(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
const uint32_t multiplier =
(scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const int32_t smin = (int32_t)(uint32_t)qmin - (int32_t)(uint32_t)zero_point;
const int32_t smax = (int32_t)(uint32_t)qmax - (int32_t)(uint32_t)zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
/*
* Compute absolute value of input as unsigned 32-bit int.
* All further computations will work with unsigned values to avoid
* undefined behaviour on signed operations.
*/
const uint32_t x_abs = (x >= 0) ? (uint32_t)x : -(uint32_t)x;
const uint32_t y_abs = (y >= 0) ? (uint32_t)y : -(uint32_t)y;
const uint32_t z_abs = (z >= 0) ? (uint32_t)z : -(uint32_t)z;
const uint32_t w_abs = (w >= 0) ? (uint32_t)w : -(uint32_t)w;
/* Compute full 64-bit product of 32-bit factors */
const uint64_t x_product = (uint64_t)x_abs * (uint64_t)multiplier;
const uint64_t y_product = (uint64_t)y_abs * (uint64_t)multiplier;
const uint64_t z_product = (uint64_t)z_abs * (uint64_t)multiplier;
const uint64_t w_product = (uint64_t)w_abs * (uint64_t)multiplier;
/*
* Shift the full 64-bit product right with rounding.
* Rounding is performed towards closest integer, with midpoints rounded up
* (same as away from zero).
*
* Note that although rounding is precomputed, it is dependent on shift
* value, and on processors with 64-bit "right shift with rounding"
* instruction each line below can be represented by just one such
* instruction (e.g. VRSHL.U64 on ARM NEON, URSHL in ARM64 Advanced SIMD).
*/
const uint32_t x_abs_scaled = (uint32_t)((x_product + rounding) >> shift);
const uint32_t y_abs_scaled = (uint32_t)((y_product + rounding) >> shift);
const uint32_t z_abs_scaled = (uint32_t)((z_product + rounding) >> shift);
const uint32_t w_abs_scaled = (uint32_t)((w_product + rounding) >> shift);
/*
* Copy the sign of input to scaled absolute input value.
*
* On x86 processors with SSSE3 instruction set, this operation nicely maps
* to PSIGND instruction.
*/
const int32_t x_scaled = (int32_t)(x >= 0 ? x_abs_scaled : -x_abs_scaled);
const int32_t y_scaled = (int32_t)(y >= 0 ? y_abs_scaled : -y_abs_scaled);
const int32_t z_scaled = (int32_t)(z >= 0 ? z_abs_scaled : -z_abs_scaled);
const int32_t w_scaled = (int32_t)(w >= 0 ? w_abs_scaled : -w_abs_scaled);
/*
* Clamp scaled value with zero point between (qmin - zero point) and (qmax
* - zero point).
*/
const int32_t x_clamped =
x_scaled < smin ? smin : x_scaled > smax ? smax : x_scaled;
const int32_t y_clamped =
y_scaled < smin ? smin : y_scaled > smax ? smax : y_scaled;
const int32_t z_clamped =
z_scaled < smin ? smin : z_scaled > smax ? smax : z_scaled;
const int32_t w_clamped =
w_scaled < smin ? smin : w_scaled > smax ? smax : w_scaled;
/*
* Add zero point to clamped value.
* The result is guaranteed to be in [qmin, qmax] range.
*
* This addition can not be safely done before clamping, because scaled
* values are in [-2147483520, 2147483519] range, so addition of zero point
* (which can be up to 255) can overflow signed 32-bit integer.
*/
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (uint8_t)x_biased;
output[1] = (uint8_t)y_biased;
output[2] = (uint8_t)z_biased;
output[3] = (uint8_t)w_biased;
output += 4;
}
}
void pytorch_qnnp_requantize_precise__scalar_signed64(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
const int32_t multiplier =
((int32_t)scale_bits & INT32_C(0x007FFFFF)) | INT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const int64_t rounding = INT64_C(1) << (shift - 1);
const int32_t smin = (int32_t)(uint32_t)qmin - (int32_t)(uint32_t)zero_point;
const int32_t smax = (int32_t)(uint32_t)qmax - (int32_t)(uint32_t)zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
/*
* Compute full 64-bit product of signed 32-bit factors.
*
* Note: multiplier can be treated as either signed or unsigned.
*/
const int64_t x_product = (int64_t)x * (int64_t)multiplier;
const int64_t y_product = (int64_t)y * (int64_t)multiplier;
const int64_t z_product = (int64_t)z * (int64_t)multiplier;
const int64_t w_product = (int64_t)w * (int64_t)multiplier;
/*
* Adjust product before subsequent shift with rounding up to simulate shift
* with rounding away from zero.
*/
const int64_t x_adjusted_product = x_product - (int64_t)(x < 0);
const int64_t y_adjusted_product = y_product - (int64_t)(y < 0);
const int64_t z_adjusted_product = z_product - (int64_t)(z < 0);
const int64_t w_adjusted_product = w_product - (int64_t)(w < 0);
/*
* Arithmetically shift the full 64-bit product right with rounding.
* Rounding is performed towards closest integer, with midpoints rounded up.
*
* Note that although rounding is precomputed, it is dependent on shift
* value, and on processors with 64-bit "right shift with rounding"
* instruction each line below can be represented by just one such
* instruction (e.g. VRSHL.S64 on ARM NEON, SRSHL in ARM64 Advanced SIMD).
*/
const int32_t x_scaled =
(int32_t)asr_s64(x_adjusted_product + rounding, shift);
const int32_t y_scaled =
(int32_t)asr_s64(y_adjusted_product + rounding, shift);
const int32_t z_scaled =
(int32_t)asr_s64(z_adjusted_product + rounding, shift);
const int32_t w_scaled =
(int32_t)asr_s64(w_adjusted_product + rounding, shift);
/*
* Clamp scaled value with zero point between (qmin - zero point) and (qmax
* - zero point).
*/
const int32_t x_clamped =
x_scaled < smin ? smin : x_scaled > smax ? smax : x_scaled;
const int32_t y_clamped =
y_scaled < smin ? smin : y_scaled > smax ? smax : y_scaled;
const int32_t z_clamped =
z_scaled < smin ? smin : z_scaled > smax ? smax : z_scaled;
const int32_t w_clamped =
w_scaled < smin ? smin : w_scaled > smax ? smax : w_scaled;
/*
* Add zero point to clamped value.
* The result is guaranteed to be in [qmin, qmax] range.
*
* This addition can not be safely done before clamping, because scaled
* values are in [-2147483520, 2147483519] range, so addition of zero point
* (which can be up to 255) can overflow signed 32-bit integer.
*/
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (uint8_t)x_biased;
output[1] = (uint8_t)y_biased;
output[2] = (uint8_t)z_biased;
output[3] = (uint8_t)w_biased;
output += 4;
}
}
| 15,091
| 40.234973
| 81
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/precise-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <emmintrin.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_precise__sse2(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
const uint32_t multiplier =
(scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short)(uint16_t)zero_point);
const __m128i vqmin = _mm_set1_epi8((char)qmin);
const __m128i vqmax = _mm_set1_epi8((char)qmax);
const __m128i vshift = _mm_cvtsi32_si128((int)shift);
const __m128i vrounding = _mm_set1_epi64x(rounding);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*)input);
const __m128i y = _mm_loadu_si128((const __m128i*)(input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*)(input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*)(input + 12));
input += 16;
const __m128i x_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), x);
const __m128i y_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), y);
const __m128i z_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), z);
const __m128i w_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), w);
const __m128i x_abs0123 =
_mm_sub_epi32(_mm_xor_si128(x, x_neg_mask), x_neg_mask);
const __m128i y_abs0123 =
_mm_sub_epi32(_mm_xor_si128(y, y_neg_mask), y_neg_mask);
const __m128i z_abs0123 =
_mm_sub_epi32(_mm_xor_si128(z, z_neg_mask), z_neg_mask);
const __m128i w_abs0123 =
_mm_sub_epi32(_mm_xor_si128(w, w_neg_mask), w_neg_mask);
const __m128i x_abs1032 =
_mm_shuffle_epi32(x_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs1032 =
_mm_shuffle_epi32(y_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs1032 =
_mm_shuffle_epi32(z_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs1032 =
_mm_shuffle_epi32(w_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier);
const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier);
const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier);
const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier);
const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier);
const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier);
const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier);
const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier);
const __m128i x_abs_scaled02 =
_mm_srl_epi64(_mm_add_epi64(x_absmul02, vrounding), vshift);
const __m128i x_abs_scaled13 =
_mm_srl_epi64(_mm_add_epi64(x_absmul13, vrounding), vshift);
const __m128i y_abs_scaled02 =
_mm_srl_epi64(_mm_add_epi64(y_absmul02, vrounding), vshift);
const __m128i y_abs_scaled13 =
_mm_srl_epi64(_mm_add_epi64(y_absmul13, vrounding), vshift);
const __m128i z_abs_scaled02 =
_mm_srl_epi64(_mm_add_epi64(z_absmul02, vrounding), vshift);
const __m128i z_abs_scaled13 =
_mm_srl_epi64(_mm_add_epi64(z_absmul13, vrounding), vshift);
const __m128i w_abs_scaled02 =
_mm_srl_epi64(_mm_add_epi64(w_absmul02, vrounding), vshift);
const __m128i w_abs_scaled13 =
_mm_srl_epi64(_mm_add_epi64(w_absmul13, vrounding), vshift);
const __m128i x_abs_scaled0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(x_abs_scaled02),
_mm_castsi128_ps(x_abs_scaled13),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i y_abs_scaled0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(y_abs_scaled02),
_mm_castsi128_ps(y_abs_scaled13),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i z_abs_scaled0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(z_abs_scaled02),
_mm_castsi128_ps(z_abs_scaled13),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i w_abs_scaled0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(w_abs_scaled02),
_mm_castsi128_ps(w_abs_scaled13),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i x_abs_scaled =
_mm_shuffle_epi32(x_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i y_abs_scaled =
_mm_shuffle_epi32(y_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i z_abs_scaled =
_mm_shuffle_epi32(z_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i w_abs_scaled =
_mm_shuffle_epi32(w_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i x_scaled =
_mm_sub_epi32(_mm_xor_si128(x_abs_scaled, x_neg_mask), x_neg_mask);
const __m128i y_scaled =
_mm_sub_epi32(_mm_xor_si128(y_abs_scaled, y_neg_mask), y_neg_mask);
const __m128i z_scaled =
_mm_sub_epi32(_mm_xor_si128(z_abs_scaled, z_neg_mask), z_neg_mask);
const __m128i w_scaled =
_mm_sub_epi32(_mm_xor_si128(w_abs_scaled, w_neg_mask), w_neg_mask);
const __m128i xy_packed =
_mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed =
_mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped =
_mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
/*
* 4x PXOR (setzero)
* 8x PSUBD
* 8x PXOR
* 8x PSHUFD
* 8x PMULUDQ
* 8x PSRLQ
* 8x PADDQ
* 4x SHUFPS
* 2x PACKSSDW
* 1x PACKUSWB
* 2x PADDW
* 1x PMAXUB
* 1x PMINUB
* ---------------------
* 63 instructions total
*/
_mm_storeu_si128((__m128i*)output, xyzw_clamped);
output += 16;
}
}
| 6,367
| 37.593939
| 75
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/precise-sse4.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <smmintrin.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_precise__sse4(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
const uint32_t multiplier = (scale_bits << 8) | UINT32_C(0x80000000);
const uint32_t shift = 127 + 31 - (scale_bits >> 23);
assert(shift >= 32);
assert(shift < 64);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short)(uint16_t)zero_point);
const __m128i vqmin = _mm_set1_epi8((char)qmin);
const __m128i vqmax = _mm_set1_epi8((char)qmax);
const __m128i vshiftlo = _mm_cvtsi32_si128((int)shift);
const __m128i vshifthi = _mm_cvtsi32_si128((int)shift - 32);
const __m128i vrounding = _mm_set1_epi64x(rounding);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*)input);
const __m128i y = _mm_loadu_si128((const __m128i*)(input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*)(input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*)(input + 12));
input += 16;
const __m128i x_abs0123 = _mm_abs_epi32(x);
const __m128i y_abs0123 = _mm_abs_epi32(y);
const __m128i z_abs0123 = _mm_abs_epi32(z);
const __m128i w_abs0123 = _mm_abs_epi32(w);
const __m128i x_abs1032 =
_mm_shuffle_epi32(x_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs1032 =
_mm_shuffle_epi32(y_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs1032 =
_mm_shuffle_epi32(z_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs1032 =
_mm_shuffle_epi32(w_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier);
const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier);
const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier);
const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier);
const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier);
const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier);
const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier);
const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier);
const __m128i x_abs_scaled02 =
_mm_srl_epi64(_mm_add_epi64(x_absmul02, vrounding), vshiftlo);
const __m128i x_abs_scaled13 =
_mm_srl_epi32(_mm_add_epi64(x_absmul13, vrounding), vshifthi);
const __m128i y_abs_scaled02 =
_mm_srl_epi64(_mm_add_epi64(y_absmul02, vrounding), vshiftlo);
const __m128i y_abs_scaled13 =
_mm_srl_epi32(_mm_add_epi64(y_absmul13, vrounding), vshifthi);
const __m128i z_abs_scaled02 =
_mm_srl_epi64(_mm_add_epi64(z_absmul02, vrounding), vshiftlo);
const __m128i z_abs_scaled13 =
_mm_srl_epi32(_mm_add_epi64(z_absmul13, vrounding), vshifthi);
const __m128i w_abs_scaled02 =
_mm_srl_epi64(_mm_add_epi64(w_absmul02, vrounding), vshiftlo);
const __m128i w_abs_scaled13 =
_mm_srl_epi32(_mm_add_epi64(w_absmul13, vrounding), vshifthi);
const __m128i x_abs_scaled =
_mm_blend_epi16(x_abs_scaled02, x_abs_scaled13, 0xCC);
const __m128i y_abs_scaled =
_mm_blend_epi16(y_abs_scaled02, y_abs_scaled13, 0xCC);
const __m128i z_abs_scaled =
_mm_blend_epi16(z_abs_scaled02, z_abs_scaled13, 0xCC);
const __m128i w_abs_scaled =
_mm_blend_epi16(w_abs_scaled02, w_abs_scaled13, 0xCC);
const __m128i x_scaled = _mm_sign_epi32(x_abs_scaled, x);
const __m128i y_scaled = _mm_sign_epi32(y_abs_scaled, y);
const __m128i z_scaled = _mm_sign_epi32(z_abs_scaled, z);
const __m128i w_scaled = _mm_sign_epi32(w_abs_scaled, w);
const __m128i xy_packed =
_mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed =
_mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped =
_mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
/*
* 4x PABSD
* 4x PSHUFD
* 8x PMULUDQ
* 4x PSRLQ
* 4x PSRLD
* 8x PADDQ
* 4x PBLENDW
* 4x PSIGND
* 2x PACKSSDW
* 1x PACKUSWB
* 2x PADDW
* 1x PMAXUB
* 1x PMINUB
* ---------------------
* 47 instructions total
*/
_mm_storeu_si128((__m128i*)output, xyzw_clamped);
output += 16;
}
}
| 4,984
| 35.925926
| 74
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/precise-ssse3.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <tmmintrin.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_precise__ssse3(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
const uint32_t multiplier =
(scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short)(uint16_t)zero_point);
const __m128i vqmin = _mm_set1_epi8((char)qmin);
const __m128i vqmax = _mm_set1_epi8((char)qmax);
const __m128i vshift = _mm_cvtsi32_si128((int)shift);
const __m128i vrounding = _mm_set1_epi64x(rounding);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*)input);
const __m128i y = _mm_loadu_si128((const __m128i*)(input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*)(input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*)(input + 12));
input += 16;
const __m128i x_abs0123 = _mm_abs_epi32(x);
const __m128i y_abs0123 = _mm_abs_epi32(y);
const __m128i z_abs0123 = _mm_abs_epi32(z);
const __m128i w_abs0123 = _mm_abs_epi32(w);
const __m128i x_abs1032 =
_mm_shuffle_epi32(x_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs1032 =
_mm_shuffle_epi32(y_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs1032 =
_mm_shuffle_epi32(z_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs1032 =
_mm_shuffle_epi32(w_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier);
const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier);
const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier);
const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier);
const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier);
const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier);
const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier);
const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier);
const __m128i x_abs_scaled02 =
_mm_srl_epi64(_mm_add_epi64(x_absmul02, vrounding), vshift);
const __m128i x_abs_scaled13 =
_mm_srl_epi64(_mm_add_epi64(x_absmul13, vrounding), vshift);
const __m128i y_abs_scaled02 =
_mm_srl_epi64(_mm_add_epi64(y_absmul02, vrounding), vshift);
const __m128i y_abs_scaled13 =
_mm_srl_epi64(_mm_add_epi64(y_absmul13, vrounding), vshift);
const __m128i z_abs_scaled02 =
_mm_srl_epi64(_mm_add_epi64(z_absmul02, vrounding), vshift);
const __m128i z_abs_scaled13 =
_mm_srl_epi64(_mm_add_epi64(z_absmul13, vrounding), vshift);
const __m128i w_abs_scaled02 =
_mm_srl_epi64(_mm_add_epi64(w_absmul02, vrounding), vshift);
const __m128i w_abs_scaled13 =
_mm_srl_epi64(_mm_add_epi64(w_absmul13, vrounding), vshift);
const __m128i x_abs_scaled0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(x_abs_scaled02),
_mm_castsi128_ps(x_abs_scaled13),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i y_abs_scaled0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(y_abs_scaled02),
_mm_castsi128_ps(y_abs_scaled13),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i z_abs_scaled0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(z_abs_scaled02),
_mm_castsi128_ps(z_abs_scaled13),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i w_abs_scaled0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(w_abs_scaled02),
_mm_castsi128_ps(w_abs_scaled13),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i x_abs_scaled =
_mm_shuffle_epi32(x_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i y_abs_scaled =
_mm_shuffle_epi32(y_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i z_abs_scaled =
_mm_shuffle_epi32(z_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i w_abs_scaled =
_mm_shuffle_epi32(w_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i x_scaled = _mm_sign_epi32(x_abs_scaled, x);
const __m128i y_scaled = _mm_sign_epi32(y_abs_scaled, y);
const __m128i z_scaled = _mm_sign_epi32(z_abs_scaled, z);
const __m128i w_scaled = _mm_sign_epi32(w_abs_scaled, w);
const __m128i xy_packed =
_mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed =
_mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped =
_mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
/*
* 4x PABSD
* 8x PSHUFD
* 8x PMULUDQ
* 8x PSRLQ
* 8x PADDQ
* 4x SHUFPS
* 4x PSIGND
* 2x PACKSSDW
* 1x PACKUSWB
* 2x PADDW
* 1x PMAXUB
* 1x PMINUB
* ---------------------
* 51 instructions total
*/
_mm_storeu_si128((__m128i*)output, xyzw_clamped);
output += 16;
}
}
| 5,696
| 36.728477
| 74
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/q31-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <arm_neon.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_q31__neon(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
/* Compute requantization parameters */
const uint32_t scale_bits = fp32_to_bits(scale);
/* Multiplier is in [0x40000000, 0x7FFFFF80] range */
const int32_t multiplier = (int32_t)(
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
/* Shift is in [0, 31] range */
const int32_t shift = 127 + 31 - 32 - (fp32_to_bits(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const int32x4_t vmultiplier = vdupq_n_s32(multiplier);
const int16x8_t vzero_point = vdupq_n_s16((int16_t)(uint16_t)zero_point);
const int32x4_t vshift = vdupq_n_s32(-shift);
const int32x4_t vshift_eq_0_mask =
vreinterpretq_s32_u32(vceqq_s32(vshift, vmovq_n_s32(0)));
const uint8x16_t vqmin = vdupq_n_u8(qmin);
const uint8x16_t vqmax = vdupq_n_u8(qmax);
for (; n != 0; n -= 16) {
const int32x4_t x = vld1q_s32(input);
const int32x4_t y = vld1q_s32(input + 4);
const int32x4_t z = vld1q_s32(input + 8);
const int32x4_t w = vld1q_s32(input + 12);
input += 16;
/*
* Directly use VQRDMULH/SQRDMULH instruction for Q31 multiplication with
* rounding. Although these instruction saturate out-of-range outputs, we
* never hit this case in requantization.
*/
const int32x4_t x_product = vqrdmulhq_s32(x, vmultiplier);
const int32x4_t y_product = vqrdmulhq_s32(y, vmultiplier);
const int32x4_t z_product = vqrdmulhq_s32(z, vmultiplier);
const int32x4_t w_product = vqrdmulhq_s32(w, vmultiplier);
/*
* Shift the 32-bit product right with rounding.
* Rounding is performed towards closest integer, with midpoints rounded up
* (same as away from zero).
*
* We leverage the "right shift with rounding" instruction (VRSHL.S32 on ARM
* NEON, SRSHL in ARM64 Advanced SIMD) to do the shift. However, as this
* instruction rounds midpoints up, rather than away from zero, we adjust
* the input by subtracting 1 from negative values, but only if shift is
* non-zero.
*/
const int32x4_t x_adjusted_product =
vsraq_n_s32(x_product, vbicq_s32(x, vshift_eq_0_mask), 31);
const int32x4_t y_adjusted_product =
vsraq_n_s32(y_product, vbicq_s32(y, vshift_eq_0_mask), 31);
const int32x4_t z_adjusted_product =
vsraq_n_s32(z_product, vbicq_s32(z, vshift_eq_0_mask), 31);
const int32x4_t w_adjusted_product =
vsraq_n_s32(w_product, vbicq_s32(w, vshift_eq_0_mask), 31);
const int32x4_t x_scaled = vrshlq_s32(x_adjusted_product, vshift);
const int32x4_t y_scaled = vrshlq_s32(y_adjusted_product, vshift);
const int32x4_t z_scaled = vrshlq_s32(z_adjusted_product, vshift);
const int32x4_t w_scaled = vrshlq_s32(w_adjusted_product, vshift);
#ifdef __aarch64__
const int16x8_t xy_packed = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(x_scaled), y_scaled), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(
vqmovn_high_s32(vqmovn_s32(z_scaled), w_scaled), vzero_point);
const uint8x16_t xyzw_packed =
vqmovun_high_s16(vqmovun_s16(xy_packed), zw_packed);
#else
const int16x8_t xy_packed = vqaddq_s16(
vcombine_s16(vqmovn_s32(x_scaled), vqmovn_s32(y_scaled)), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(
vcombine_s16(vqmovn_s32(z_scaled), vqmovn_s32(w_scaled)), vzero_point);
const uint8x16_t xyzw_packed =
vcombine_u8(vqmovun_s16(xy_packed), vqmovun_s16(zw_packed));
#endif
const uint8x16_t xyzw_clamped =
vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin);
/*
* AArch32 version:
* 4x VQRDMULH.S32 Qd, Qm, Qn
* 4x VAND Qd, Qm, Dn
* 4x VSRA.S32 Qd, Qm, #31
* 4x VRSHL.S32 Qd, Qm, Qn
* 4x VQMOVN.S32 Dd, Qm
* 2x VADD.S16 Qd, Qm, Qn
* 2x VQMOVUN.S16 Dd, Qm
* 1x VMAX.U8 Qd, Qm, Qn
* 1x VMIN.U8 Qd, Qm, Qn
* ---------------------
* 26 instructions total
*
* AArch64 version:
* 4x SQRDMULH Vd.4S, Vn.4S, Vm.4S
* 4x AND Vd.16B, Vn.16B, Vm.16B
* 4x SSRA Vd.4S, Vn.4S, #31
* 4x SRSHL Vd.4S, Vn.4S, Vm.4S
* 2x SQXTN Vd.4H, Vn.4S
* 2x SQXTN2 Vd.8H, Vn.4S
* 2x ADD Vd.8H, Vn.8H, Vm.8H
* 1x SQXTUN Vd.8B, Vn.8H
* 1x SQXTUN2 Vd.16B, Vn.8H
* 1x UMIN Vd.16B, Vn.16B, Vm.16B
* 1x UMAX Vd.16B, Vn.16B, Vm.16B
* ---------------------
* 26 instructions total
*/
vst1q_u8(output, xyzw_clamped);
output += 16;
}
}
| 5,157
| 34.572414
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/q31-scalar.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
#include <qnnpack/scalar-utils.h>
void pytorch_qnnp_requantize_q31__scalar(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
/* Compute requantization parameters */
const uint32_t scale_bits = fp32_to_bits(scale);
/* Multiplier is in [0x40000000, 0x7FFFFF80] range */
const int32_t multiplier = (int32_t)(
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
/* Shift is in [0, 31] range */
const int32_t shift = 127 + 31 - 32 - (fp32_to_bits(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const int64_t q31rounding = INT64_C(0x40000000);
const int32_t remainder_mask =
(int32_t)((UINT32_C(1) << shift) - UINT32_C(1));
const int32_t threshold = (int32_t)((uint32_t)remainder_mask >> 1);
const int32_t smin = (int32_t)(uint32_t)qmin - (int32_t)(uint32_t)zero_point;
const int32_t smax = (int32_t)(uint32_t)qmax - (int32_t)(uint32_t)zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
/*
* Compute full 64-bit product of signed 32-bit factors.
*
* Note: multiplier can be treated as either signed or unsigned.
*/
const int64_t x_product = (int64_t)x * (int64_t)multiplier;
const int64_t y_product = (int64_t)y * (int64_t)multiplier;
const int64_t z_product = (int64_t)z * (int64_t)multiplier;
const int64_t w_product = (int64_t)w * (int64_t)multiplier;
/*
* Get the Q31 multiplication result by extracting bits 31-62 of the
* product, with rounding up. Add rounding value (0x40000000) and then shift
* right by 31 bits and extract the low 32-bit word. Note: casts to unsigned
* types are needed to avoid undefined behavior. Given the multiplier range,
* the result of Q31 multiplication is in [-2147483520, 2147483519] range.
*/
const int32_t x_q31product =
(int32_t)(uint32_t)((uint64_t)(x_product + q31rounding) >> 31);
const int32_t y_q31product =
(int32_t)(uint32_t)((uint64_t)(y_product + q31rounding) >> 31);
const int32_t z_q31product =
(int32_t)(uint32_t)((uint64_t)(z_product + q31rounding) >> 31);
const int32_t w_q31product =
(int32_t)(uint32_t)((uint64_t)(w_product + q31rounding) >> 31);
/*
* Arithmetically shift the adjusted product right with rounding.
* Rounding is performed towards closest integer, with midpoints rounded
* away from zero.
*
* Shift with correct rounding could be efficiently implemented by
* pre-adding rounding constant, but with input in
* [-2147483520, 2147483519] range and rounding constant up to 2**30 we
* can't rule out overflow. This limitation leaves us with 3 options:
* 1. Extend input to 64-bit signed integer, perform addition and shift on
* 64-bit integers, then truncate result to 32 bits.
* 2. Detect overflow and handle this situation separately. Note that
* overflow is possible only when input is positive, and even when addition
* of a rounding constant overflows 32-bit signed integer, it still doesn't
* overflow 32-bit unsigned integer. Thus, in case of signed overflow, we
* can compute the result using unsigned arithmetics, specifically using
* logical shift right instead of arithmetic shift right.
* 3. Performs arithmetic shift as is, which will produce division result
* rounded down. Then compute remainder of this division by a power of 2,
* and adjust the result. Result needs adjustment (increment by 1) when
* - input is positive, shift is non-zero, and remainder >= 2**(shift -
* 1), e.g. 10 >> 2 needs adjustment
* - input is negative, shift is non-zero, and remainder > 2**(shift -
* 1), e.g. -10 >> 2 doesn't need adjustment These conditions can be
* generalized as remainder + (input <= 0) > 2**(shift - 1) or equivalently
* remainder - (input < 0) > ((2**shift - 1) >> 1)
* When shift is 0, remainder is 0 as well, the last condition is always
* false, and no adjustment is done.
*
* Among these options, option 3 is the most performant across the board,
* although option 1 is promising for 64-bit instruction sets.
*/
const int32_t x_remainder =
(x_q31product & remainder_mask) - (int32_t)(x_q31product < 0);
const int32_t y_remainder =
(y_q31product & remainder_mask) - (int32_t)(y_q31product < 0);
const int32_t z_remainder =
(z_q31product & remainder_mask) - (int32_t)(z_q31product < 0);
const int32_t w_remainder =
(w_q31product & remainder_mask) - (int32_t)(w_q31product < 0);
const int32_t x_scaled =
asr_s32(x_q31product, shift) + (int32_t)(x_remainder > threshold);
const int32_t y_scaled =
asr_s32(y_q31product, shift) + (int32_t)(y_remainder > threshold);
const int32_t z_scaled =
asr_s32(z_q31product, shift) + (int32_t)(z_remainder > threshold);
const int32_t w_scaled =
asr_s32(w_q31product, shift) + (int32_t)(w_remainder > threshold);
/*
* Clamp scaled value with zero point between (qmin - zero point) and (qmax
* - zero point).
*/
const int32_t x_clamped =
x_scaled < smin ? smin : x_scaled > smax ? smax : x_scaled;
const int32_t y_clamped =
y_scaled < smin ? smin : y_scaled > smax ? smax : y_scaled;
const int32_t z_clamped =
z_scaled < smin ? smin : z_scaled > smax ? smax : z_scaled;
const int32_t w_clamped =
w_scaled < smin ? smin : w_scaled > smax ? smax : w_scaled;
/*
* Add zero point to clamped value.
* The result is guaranteed to be in [qmin, qmax] range.
*
* This addition can not be safely done before clamping, because scaled
* values are in [-2147483520, 2147483519] range, so addition of zero point
* (which can be up to 255) can overflow signed 32-bit integer.
*/
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (uint8_t)x_biased;
output[1] = (uint8_t)y_biased;
output[2] = (uint8_t)z_biased;
output[3] = (uint8_t)w_biased;
output += 4;
}
}
| 6,949
| 41.378049
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/q31-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <emmintrin.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_q31__sse2(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
/* Compute requantization parameters */
const uint32_t scale_bits = fp32_to_bits(scale);
/* Multiplier is in [0x40000000, 0x7FFFFF80] range */
const int32_t multiplier = (int32_t)(
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
/* Shift is in [0, 31] range */
const int32_t shift = 127 + 31 - 32 - (fp32_to_bits(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short)(uint16_t)zero_point);
const __m128i vqmin = _mm_set1_epi8((char)qmin);
const __m128i vqmax = _mm_set1_epi8((char)qmax);
const __m128i vshift = _mm_cvtsi32_si128((int)shift);
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const __m128i vremainder_mask = _mm_set1_epi32((int)remainder_mask);
const __m128i vthreshold = _mm_set1_epi32((int)(remainder_mask >> 1));
const __m128i vq31rounding = _mm_set1_epi64x(UINT64_C(0x40000000));
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*)input);
const __m128i y = _mm_loadu_si128((const __m128i*)(input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*)(input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*)(input + 12));
input += 16;
const __m128i x_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), x);
const __m128i y_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), y);
const __m128i z_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), z);
const __m128i w_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), w);
const __m128i x_abs =
_mm_sub_epi32(_mm_xor_si128(x, x_neg_mask), x_neg_mask);
const __m128i y_abs =
_mm_sub_epi32(_mm_xor_si128(y, y_neg_mask), y_neg_mask);
const __m128i z_abs =
_mm_sub_epi32(_mm_xor_si128(z, z_neg_mask), z_neg_mask);
const __m128i w_abs =
_mm_sub_epi32(_mm_xor_si128(w, w_neg_mask), w_neg_mask);
const __m128i x_abs_rev = _mm_shuffle_epi32(x_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs_rev = _mm_shuffle_epi32(y_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs_rev = _mm_shuffle_epi32(z_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs_rev = _mm_shuffle_epi32(w_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_abs_product_even = _mm_mul_epu32(x_abs, vmultiplier);
const __m128i y_abs_product_even = _mm_mul_epu32(y_abs, vmultiplier);
const __m128i z_abs_product_even = _mm_mul_epu32(z_abs, vmultiplier);
const __m128i w_abs_product_even = _mm_mul_epu32(w_abs, vmultiplier);
const __m128i x_neg_mask_even =
_mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i y_neg_mask_even =
_mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i z_neg_mask_even =
_mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i w_neg_mask_even =
_mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i x_product_even = _mm_sub_epi64(
_mm_xor_si128(x_abs_product_even, x_neg_mask_even), x_neg_mask_even);
const __m128i y_product_even = _mm_sub_epi64(
_mm_xor_si128(y_abs_product_even, y_neg_mask_even), y_neg_mask_even);
const __m128i z_product_even = _mm_sub_epi64(
_mm_xor_si128(z_abs_product_even, z_neg_mask_even), z_neg_mask_even);
const __m128i w_product_even = _mm_sub_epi64(
_mm_xor_si128(w_abs_product_even, w_neg_mask_even), w_neg_mask_even);
const __m128i x_rounded_product_even =
_mm_add_epi64(x_product_even, vq31rounding);
const __m128i y_rounded_product_even =
_mm_add_epi64(y_product_even, vq31rounding);
const __m128i z_rounded_product_even =
_mm_add_epi64(z_product_even, vq31rounding);
const __m128i w_rounded_product_even =
_mm_add_epi64(w_product_even, vq31rounding);
const __m128i x_abs_product_odd = _mm_mul_epu32(x_abs_rev, vmultiplier);
const __m128i y_abs_product_odd = _mm_mul_epu32(y_abs_rev, vmultiplier);
const __m128i z_abs_product_odd = _mm_mul_epu32(z_abs_rev, vmultiplier);
const __m128i w_abs_product_odd = _mm_mul_epu32(w_abs_rev, vmultiplier);
const __m128i x_neg_mask_odd =
_mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i y_neg_mask_odd =
_mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i z_neg_mask_odd =
_mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i w_neg_mask_odd =
_mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i x_product_odd = _mm_sub_epi64(
_mm_xor_si128(x_abs_product_odd, x_neg_mask_odd), x_neg_mask_odd);
const __m128i y_product_odd = _mm_sub_epi64(
_mm_xor_si128(y_abs_product_odd, y_neg_mask_odd), y_neg_mask_odd);
const __m128i z_product_odd = _mm_sub_epi64(
_mm_xor_si128(z_abs_product_odd, z_neg_mask_odd), z_neg_mask_odd);
const __m128i w_product_odd = _mm_sub_epi64(
_mm_xor_si128(w_abs_product_odd, w_neg_mask_odd), w_neg_mask_odd);
const __m128i x_rounded_product_odd =
_mm_add_epi64(x_product_odd, vq31rounding);
const __m128i y_rounded_product_odd =
_mm_add_epi64(y_product_odd, vq31rounding);
const __m128i z_rounded_product_odd =
_mm_add_epi64(z_product_odd, vq31rounding);
const __m128i w_rounded_product_odd =
_mm_add_epi64(w_product_odd, vq31rounding);
const __m128i x_q31product_even =
_mm_srli_epi64(x_rounded_product_even, 31);
const __m128i x_q31product_odd = _mm_srli_epi64(x_rounded_product_odd, 31);
const __m128i y_q31product_even =
_mm_srli_epi64(y_rounded_product_even, 31);
const __m128i y_q31product_odd = _mm_srli_epi64(y_rounded_product_odd, 31);
const __m128i z_q31product_even =
_mm_srli_epi64(z_rounded_product_even, 31);
const __m128i z_q31product_odd = _mm_srli_epi64(z_rounded_product_odd, 31);
const __m128i w_q31product_even =
_mm_srli_epi64(w_rounded_product_even, 31);
const __m128i w_q31product_odd = _mm_srli_epi64(w_rounded_product_odd, 31);
const __m128i x_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(x_q31product_even),
_mm_castsi128_ps(x_q31product_odd),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i y_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(y_q31product_even),
_mm_castsi128_ps(y_q31product_odd),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i z_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(z_q31product_even),
_mm_castsi128_ps(z_q31product_odd),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i w_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(w_q31product_even),
_mm_castsi128_ps(w_q31product_odd),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i x_q31product =
_mm_shuffle_epi32(x_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i y_q31product =
_mm_shuffle_epi32(y_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i z_q31product =
_mm_shuffle_epi32(z_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i w_q31product =
_mm_shuffle_epi32(w_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i x_remainder = _mm_add_epi32(
_mm_and_si128(x_q31product, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), x_q31product));
const __m128i y_remainder = _mm_add_epi32(
_mm_and_si128(y_q31product, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), y_q31product));
const __m128i z_remainder = _mm_add_epi32(
_mm_and_si128(z_q31product, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), z_q31product));
const __m128i w_remainder = _mm_add_epi32(
_mm_and_si128(w_q31product, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), w_q31product));
const __m128i x_scaled = _mm_sub_epi32(
_mm_sra_epi32(x_q31product, vshift),
_mm_cmpgt_epi32(x_remainder, vthreshold));
const __m128i y_scaled = _mm_sub_epi32(
_mm_sra_epi32(y_q31product, vshift),
_mm_cmpgt_epi32(y_remainder, vthreshold));
const __m128i z_scaled = _mm_sub_epi32(
_mm_sra_epi32(z_q31product, vshift),
_mm_cmpgt_epi32(z_remainder, vthreshold));
const __m128i w_scaled = _mm_sub_epi32(
_mm_sra_epi32(w_q31product, vshift),
_mm_cmpgt_epi32(w_remainder, vthreshold));
const __m128i xy_packed =
_mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed =
_mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped =
_mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
/*
* 16x PSHUFD
* 4x SHUFPS
* 8x PMULUDQ
* 8x PXOR (setzero)
* 12x PXOR
* 4x PAND
* 8x PADDQ
* 4x PADDD
* 2x PADDW
* 8x PSUBQ
* 8x PSUBD
* 8x PSRLQ (immediate)
* 4x PSRAD (register)
* 12x PCMPGTD
* 2x PACKSSDW
* 1x PACKUSWB
* 1x PMAXUB
* 1x PMINUB
* ---------------------
* 111 instructions total
*/
_mm_storeu_si128((__m128i*)output, xyzw_clamped);
output += 16;
}
}
| 10,115
| 40.801653
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/q31-sse4.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <smmintrin.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_q31__sse4(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
/* Compute requantization parameters */
const uint32_t scale_bits = fp32_to_bits(scale);
/* Multiplier is in [0x40000000, 0x7FFFFF80] range */
const int32_t multiplier = (int32_t)(
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
/* Shift is in [0, 31] range */
const int32_t shift = 127 + 31 - 32 - (fp32_to_bits(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short)(uint16_t)zero_point);
const __m128i vqmin = _mm_set1_epi8((char)qmin);
const __m128i vqmax = _mm_set1_epi8((char)qmax);
const __m128i vshift = _mm_cvtsi32_si128((int)shift);
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const __m128i vremainder_mask = _mm_set1_epi32((int)remainder_mask);
const __m128i vthreshold = _mm_set1_epi32((int)(remainder_mask >> 1));
const __m128i vq31rounding = _mm_set1_epi64x(UINT64_C(0x40000000));
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*)input);
const __m128i y = _mm_loadu_si128((const __m128i*)(input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*)(input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*)(input + 12));
input += 16;
const __m128i x_rev = _mm_shuffle_epi32(x, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_rev = _mm_shuffle_epi32(y, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_rev = _mm_shuffle_epi32(z, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_rev = _mm_shuffle_epi32(w, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_product_even =
_mm_add_epi64(_mm_mul_epi32(x, vmultiplier), vq31rounding);
const __m128i y_product_even =
_mm_add_epi64(_mm_mul_epi32(y, vmultiplier), vq31rounding);
const __m128i z_product_even =
_mm_add_epi64(_mm_mul_epi32(z, vmultiplier), vq31rounding);
const __m128i w_product_even =
_mm_add_epi64(_mm_mul_epi32(w, vmultiplier), vq31rounding);
const __m128i x_product_odd =
_mm_add_epi64(_mm_mul_epi32(x_rev, vmultiplier), vq31rounding);
const __m128i y_product_odd =
_mm_add_epi64(_mm_mul_epi32(y_rev, vmultiplier), vq31rounding);
const __m128i z_product_odd =
_mm_add_epi64(_mm_mul_epi32(z_rev, vmultiplier), vq31rounding);
const __m128i w_product_odd =
_mm_add_epi64(_mm_mul_epi32(w_rev, vmultiplier), vq31rounding);
const __m128i x_q31product_even = _mm_srli_epi64(x_product_even, 31);
const __m128i x_q31product_odd =
_mm_add_epi64(x_product_odd, x_product_odd);
const __m128i y_q31product_even = _mm_srli_epi64(y_product_even, 31);
const __m128i y_q31product_odd =
_mm_add_epi64(y_product_odd, y_product_odd);
const __m128i z_q31product_even = _mm_srli_epi64(z_product_even, 31);
const __m128i z_q31product_odd =
_mm_add_epi64(z_product_odd, z_product_odd);
const __m128i w_q31product_even = _mm_srli_epi64(w_product_even, 31);
const __m128i w_q31product_odd =
_mm_add_epi64(w_product_odd, w_product_odd);
const __m128i x_q31product =
_mm_blend_epi16(x_q31product_even, x_q31product_odd, 0xCC);
const __m128i y_q31product =
_mm_blend_epi16(y_q31product_even, y_q31product_odd, 0xCC);
const __m128i z_q31product =
_mm_blend_epi16(z_q31product_even, z_q31product_odd, 0xCC);
const __m128i w_q31product =
_mm_blend_epi16(w_q31product_even, w_q31product_odd, 0xCC);
const __m128i x_remainder = _mm_add_epi32(
_mm_and_si128(x_q31product, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), x_q31product));
const __m128i y_remainder = _mm_add_epi32(
_mm_and_si128(y_q31product, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), y_q31product));
const __m128i z_remainder = _mm_add_epi32(
_mm_and_si128(z_q31product, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), z_q31product));
const __m128i w_remainder = _mm_add_epi32(
_mm_and_si128(w_q31product, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), w_q31product));
const __m128i x_scaled = _mm_sub_epi32(
_mm_sra_epi32(x_q31product, vshift),
_mm_cmpgt_epi32(x_remainder, vthreshold));
const __m128i y_scaled = _mm_sub_epi32(
_mm_sra_epi32(y_q31product, vshift),
_mm_cmpgt_epi32(y_remainder, vthreshold));
const __m128i z_scaled = _mm_sub_epi32(
_mm_sra_epi32(z_q31product, vshift),
_mm_cmpgt_epi32(z_remainder, vthreshold));
const __m128i w_scaled = _mm_sub_epi32(
_mm_sra_epi32(w_q31product, vshift),
_mm_cmpgt_epi32(w_remainder, vthreshold));
const __m128i xy_packed =
_mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed =
_mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped =
_mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
/*
* 4x PSHUFD
* 8x PMULDQ
* 12x PADDQ
* 4x PADDD
* 2x PADDW
* 4x PSUBD
* 4x PSLRQ (immediate)
* 4x PSRAD (register)
* 4x PBLENDW
* 4x PAND
* 4x PXOR (setzero)
* 8x PCMPGTD
* 2x PACKSSDW
* 1x PACKUSWB
* 1x PMAXUB
* 1x PMINUB
* ---------------------
* 67 instructions total
*/
_mm_storeu_si128((__m128i*)output, xyzw_clamped);
output += 16;
}
}
| 6,240
| 37.288344
| 74
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/q31-ssse3.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <tmmintrin.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
void pytorch_qnnp_requantize_q31__ssse3(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
/* Compute requantization parameters */
const uint32_t scale_bits = fp32_to_bits(scale);
/* Multiplier is in [0x40000000, 0x7FFFFF80] range */
const int32_t multiplier = (int32_t)(
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
/* Shift is in [0, 31] range */
const int32_t shift = 127 + 31 - 32 - (fp32_to_bits(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short)(uint16_t)zero_point);
const __m128i vqmin = _mm_set1_epi8((char)qmin);
const __m128i vqmax = _mm_set1_epi8((char)qmax);
const __m128i vshift = _mm_cvtsi32_si128((int)shift);
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const __m128i vremainder_mask = _mm_set1_epi32((int)remainder_mask);
const __m128i vthreshold = _mm_set1_epi32((int)(remainder_mask >> 1));
const __m128i vq31rounding = _mm_set1_epi64x(UINT64_C(0x40000000));
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*)input);
const __m128i y = _mm_loadu_si128((const __m128i*)(input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*)(input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*)(input + 12));
input += 16;
const __m128i x_abs = _mm_abs_epi32(x);
const __m128i y_abs = _mm_abs_epi32(y);
const __m128i z_abs = _mm_abs_epi32(z);
const __m128i w_abs = _mm_abs_epi32(w);
const __m128i x_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), x);
const __m128i y_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), y);
const __m128i z_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), z);
const __m128i w_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), w);
const __m128i x_abs_rev = _mm_shuffle_epi32(x_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs_rev = _mm_shuffle_epi32(y_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs_rev = _mm_shuffle_epi32(z_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs_rev = _mm_shuffle_epi32(w_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_abs_product_even = _mm_mul_epu32(x_abs, vmultiplier);
const __m128i y_abs_product_even = _mm_mul_epu32(y_abs, vmultiplier);
const __m128i z_abs_product_even = _mm_mul_epu32(z_abs, vmultiplier);
const __m128i w_abs_product_even = _mm_mul_epu32(w_abs, vmultiplier);
const __m128i x_neg_mask_even =
_mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i y_neg_mask_even =
_mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i z_neg_mask_even =
_mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i w_neg_mask_even =
_mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i x_product_even = _mm_sub_epi64(
_mm_xor_si128(x_abs_product_even, x_neg_mask_even), x_neg_mask_even);
const __m128i y_product_even = _mm_sub_epi64(
_mm_xor_si128(y_abs_product_even, y_neg_mask_even), y_neg_mask_even);
const __m128i z_product_even = _mm_sub_epi64(
_mm_xor_si128(z_abs_product_even, z_neg_mask_even), z_neg_mask_even);
const __m128i w_product_even = _mm_sub_epi64(
_mm_xor_si128(w_abs_product_even, w_neg_mask_even), w_neg_mask_even);
const __m128i x_rounded_product_even =
_mm_add_epi64(x_product_even, vq31rounding);
const __m128i y_rounded_product_even =
_mm_add_epi64(y_product_even, vq31rounding);
const __m128i z_rounded_product_even =
_mm_add_epi64(z_product_even, vq31rounding);
const __m128i w_rounded_product_even =
_mm_add_epi64(w_product_even, vq31rounding);
const __m128i x_abs_product_odd = _mm_mul_epu32(x_abs_rev, vmultiplier);
const __m128i y_abs_product_odd = _mm_mul_epu32(y_abs_rev, vmultiplier);
const __m128i z_abs_product_odd = _mm_mul_epu32(z_abs_rev, vmultiplier);
const __m128i w_abs_product_odd = _mm_mul_epu32(w_abs_rev, vmultiplier);
const __m128i x_neg_mask_odd =
_mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i y_neg_mask_odd =
_mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i z_neg_mask_odd =
_mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i w_neg_mask_odd =
_mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i x_product_odd = _mm_sub_epi64(
_mm_xor_si128(x_abs_product_odd, x_neg_mask_odd), x_neg_mask_odd);
const __m128i y_product_odd = _mm_sub_epi64(
_mm_xor_si128(y_abs_product_odd, y_neg_mask_odd), y_neg_mask_odd);
const __m128i z_product_odd = _mm_sub_epi64(
_mm_xor_si128(z_abs_product_odd, z_neg_mask_odd), z_neg_mask_odd);
const __m128i w_product_odd = _mm_sub_epi64(
_mm_xor_si128(w_abs_product_odd, w_neg_mask_odd), w_neg_mask_odd);
const __m128i x_rounded_product_odd =
_mm_add_epi64(x_product_odd, vq31rounding);
const __m128i y_rounded_product_odd =
_mm_add_epi64(y_product_odd, vq31rounding);
const __m128i z_rounded_product_odd =
_mm_add_epi64(z_product_odd, vq31rounding);
const __m128i w_rounded_product_odd =
_mm_add_epi64(w_product_odd, vq31rounding);
const __m128i x_q31product_even =
_mm_srli_epi64(x_rounded_product_even, 31);
const __m128i x_q31product_odd = _mm_srli_epi64(x_rounded_product_odd, 31);
const __m128i y_q31product_even =
_mm_srli_epi64(y_rounded_product_even, 31);
const __m128i y_q31product_odd = _mm_srli_epi64(y_rounded_product_odd, 31);
const __m128i z_q31product_even =
_mm_srli_epi64(z_rounded_product_even, 31);
const __m128i z_q31product_odd = _mm_srli_epi64(z_rounded_product_odd, 31);
const __m128i w_q31product_even =
_mm_srli_epi64(w_rounded_product_even, 31);
const __m128i w_q31product_odd = _mm_srli_epi64(w_rounded_product_odd, 31);
const __m128i x_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(x_q31product_even),
_mm_castsi128_ps(x_q31product_odd),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i y_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(y_q31product_even),
_mm_castsi128_ps(y_q31product_odd),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i z_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(z_q31product_even),
_mm_castsi128_ps(z_q31product_odd),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i w_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(w_q31product_even),
_mm_castsi128_ps(w_q31product_odd),
_MM_SHUFFLE(2, 0, 2, 0)));
const __m128i x_q31product =
_mm_shuffle_epi32(x_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i y_q31product =
_mm_shuffle_epi32(y_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i z_q31product =
_mm_shuffle_epi32(z_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i w_q31product =
_mm_shuffle_epi32(w_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i x_remainder = _mm_add_epi32(
_mm_and_si128(x_q31product, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), x_q31product));
const __m128i y_remainder = _mm_add_epi32(
_mm_and_si128(y_q31product, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), y_q31product));
const __m128i z_remainder = _mm_add_epi32(
_mm_and_si128(z_q31product, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), z_q31product));
const __m128i w_remainder = _mm_add_epi32(
_mm_and_si128(w_q31product, vremainder_mask),
_mm_cmpgt_epi32(_mm_setzero_si128(), w_q31product));
const __m128i x_scaled = _mm_sub_epi32(
_mm_sra_epi32(x_q31product, vshift),
_mm_cmpgt_epi32(x_remainder, vthreshold));
const __m128i y_scaled = _mm_sub_epi32(
_mm_sra_epi32(y_q31product, vshift),
_mm_cmpgt_epi32(y_remainder, vthreshold));
const __m128i z_scaled = _mm_sub_epi32(
_mm_sra_epi32(z_q31product, vshift),
_mm_cmpgt_epi32(z_remainder, vthreshold));
const __m128i w_scaled = _mm_sub_epi32(
_mm_sra_epi32(w_q31product, vshift),
_mm_cmpgt_epi32(w_remainder, vthreshold));
const __m128i xy_packed =
_mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed =
_mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped =
_mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
/*
* 16x PSHUFD
* 4x SHUFPS
* 8x PMULUDQ
* 8x PXOR (setzero)
* 8x PXOR
* 4x PAND
* 8x PADDQ
* 4x PADDD
* 2x PADDW
* 8x PSUBQ
* 4x PSUBD
* 8x PSRLQ (immediate)
* 4x PSRAD (register)
* 12x PCMPGTD
* 4x PABSD
* 2x PACKSSDW
* 1x PACKUSWB
* 1x PMAXUB
* 1x PMINUB
* ---------------------
* 107 instructions total
*/
_mm_storeu_si128((__m128i*)output, xyzw_clamped);
output += 16;
}
}
| 9,943
| 40.606695
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/runtime-neon.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <arm_neon.h>
PYTORCH_QNNP_INLINE uint16x8_t
sub_zero_point(const uint8x8_t va, const uint8x8_t vzp) {
#if PYTORCH_QNNPACK_RUNTIME_QUANTIZATION
// Run-time quantization
return vsubl_u8(va, vzp);
#else
// Design-time quantization
return vmovl_u8(va);
#endif
}
| 509
| 21.173913
| 72
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/runtime-sse2.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <immintrin.h>
PYTORCH_QNNP_INLINE __m128i
sub_zero_point(const __m128i va, const __m128i vzp) {
#if PYTORCH_QNNPACK_RUNTIME_QUANTIZATION
// Run-time quantization
return _mm_sub_epi16(va, vzp);
#else
// Design-time quantization (no-op)
return va;
#endif
}
| 506
| 21.043478
| 72
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/sconv/6x8-psimd.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <psimd.h>
#include <qnnpack/sconv.h>
void pytorch_sconv_ukernel_6x8__psimd(
size_t mr,
size_t nr,
size_t kc,
size_t ks,
const float** restrict a,
const float* restrict w,
float* restrict c,
size_t c_stride,
const struct pytorch_qnnp_fp32_clamping_params
clamping_params[restrict static 1]) {
psimd_f32 vacc0x0123 = psimd_load_f32(w);
w += 4;
psimd_f32 vacc0x4567 = psimd_load_f32(w);
w += 4;
psimd_f32 vacc1x0123 = vacc0x0123;
psimd_f32 vacc1x4567 = vacc0x4567;
psimd_f32 vacc2x0123 = vacc0x0123;
psimd_f32 vacc2x4567 = vacc0x4567;
psimd_f32 vacc3x0123 = vacc0x0123;
psimd_f32 vacc3x4567 = vacc0x4567;
psimd_f32 vacc4x0123 = vacc0x0123;
psimd_f32 vacc4x4567 = vacc0x4567;
psimd_f32 vacc5x0123 = vacc0x0123;
psimd_f32 vacc5x4567 = vacc0x4567;
do {
const float* restrict a0 = *a++;
const float* restrict a1 = *a++;
const float* restrict a2 = *a++;
const float* restrict a3 = *a++;
const float* restrict a4 = *a++;
const float* restrict a5 = *a++;
size_t k = kc;
do {
const psimd_f32 va0 = psimd_splat_f32(*a0);
a0 += 1;
const psimd_f32 va1 = psimd_splat_f32(*a1);
a1 += 1;
const psimd_f32 va2 = psimd_splat_f32(*a2);
a2 += 1;
const psimd_f32 va3 = psimd_splat_f32(*a3);
a3 += 1;
const psimd_f32 va4 = psimd_splat_f32(*a4);
a4 += 1;
const psimd_f32 va5 = psimd_splat_f32(*a5);
a5 += 1;
const psimd_f32 vb0123 = psimd_load_f32(w);
w += 4;
const psimd_f32 vb4567 = psimd_load_f32(w);
w += 4;
vacc0x0123 += vb0123 * va0;
vacc0x4567 += vb4567 * va0;
vacc1x0123 += vb0123 * va1;
vacc1x4567 += vb4567 * va1;
vacc2x0123 += vb0123 * va2;
vacc2x4567 += vb4567 * va2;
vacc3x0123 += vb0123 * va3;
vacc3x4567 += vb4567 * va3;
vacc4x0123 += vb0123 * va4;
vacc4x4567 += vb4567 * va4;
vacc5x0123 += vb0123 * va5;
vacc5x4567 += vb4567 * va5;
} while (--k != 0);
} while (--ks != 0);
const psimd_f32 vmax = psimd_splat_f32(clamping_params->max);
vacc0x0123 = psimd_min_f32(vacc0x0123, vmax);
vacc0x4567 = psimd_min_f32(vacc0x4567, vmax);
vacc1x0123 = psimd_min_f32(vacc1x0123, vmax);
vacc1x4567 = psimd_min_f32(vacc1x4567, vmax);
vacc2x0123 = psimd_min_f32(vacc2x0123, vmax);
vacc2x4567 = psimd_min_f32(vacc2x4567, vmax);
vacc3x0123 = psimd_min_f32(vacc3x0123, vmax);
vacc3x4567 = psimd_min_f32(vacc3x4567, vmax);
vacc4x0123 = psimd_min_f32(vacc4x0123, vmax);
vacc4x4567 = psimd_min_f32(vacc4x4567, vmax);
vacc5x0123 = psimd_min_f32(vacc5x0123, vmax);
vacc5x4567 = psimd_min_f32(vacc5x4567, vmax);
const psimd_f32 vmin = psimd_splat_f32(clamping_params->min);
vacc0x0123 = psimd_max_f32(vacc0x0123, vmin);
vacc0x4567 = psimd_max_f32(vacc0x4567, vmin);
vacc1x0123 = psimd_max_f32(vacc1x0123, vmin);
vacc1x4567 = psimd_max_f32(vacc1x4567, vmin);
vacc2x0123 = psimd_max_f32(vacc2x0123, vmin);
vacc2x4567 = psimd_max_f32(vacc2x4567, vmin);
vacc3x0123 = psimd_max_f32(vacc3x0123, vmin);
vacc3x4567 = psimd_max_f32(vacc3x4567, vmin);
vacc4x0123 = psimd_max_f32(vacc4x0123, vmin);
vacc4x4567 = psimd_max_f32(vacc4x4567, vmin);
vacc5x0123 = psimd_max_f32(vacc5x0123, vmin);
vacc5x4567 = psimd_max_f32(vacc5x4567, vmin);
float* c0 = c;
float* c1 = (float*)((uintptr_t)c0 + c_stride);
if (mr < 2) {
c1 = c0;
}
float* c2 = (float*)((uintptr_t)c1 + c_stride);
if (mr <= 2) {
c2 = c1;
}
float* c3 = (float*)((uintptr_t)c2 + c_stride);
if (mr < 4) {
c3 = c2;
}
float* c4 = (float*)((uintptr_t)c3 + c_stride);
if (mr <= 4) {
c4 = c3;
}
float* c5 = (float*)((uintptr_t)c4 + c_stride);
if (mr != 6) {
c5 = c4;
}
if (nr == 8) {
psimd_store_f32(c0, vacc0x0123);
c0 += 4;
psimd_store_f32(c1, vacc1x0123);
c1 += 4;
psimd_store_f32(c2, vacc2x0123);
c2 += 4;
psimd_store_f32(c3, vacc3x0123);
c3 += 4;
psimd_store_f32(c4, vacc4x0123);
c4 += 4;
psimd_store_f32(c5, vacc5x0123);
c5 += 4;
psimd_store_f32(c0, vacc0x4567);
psimd_store_f32(c1, vacc1x4567);
psimd_store_f32(c2, vacc2x4567);
psimd_store_f32(c3, vacc3x4567);
psimd_store_f32(c4, vacc4x4567);
psimd_store_f32(c5, vacc5x4567);
} else {
if (nr >= 4) {
psimd_store_f32(c0, vacc0x0123);
c0 += 4;
psimd_store_f32(c1, vacc1x0123);
c1 += 4;
psimd_store_f32(c2, vacc2x0123);
c2 += 4;
psimd_store_f32(c3, vacc3x0123);
c3 += 4;
psimd_store_f32(c4, vacc4x0123);
c4 += 4;
psimd_store_f32(c5, vacc5x0123);
c5 += 4;
vacc0x0123 = vacc0x4567;
vacc1x0123 = vacc1x4567;
vacc2x0123 = vacc2x4567;
vacc3x0123 = vacc3x4567;
vacc4x0123 = vacc4x4567;
vacc5x0123 = vacc5x4567;
nr -= 4;
}
if (nr >= 2) {
psimd_store2_f32(c0, vacc0x0123);
c0 += 2;
psimd_store2_f32(c1, vacc1x0123);
c1 += 2;
psimd_store2_f32(c2, vacc2x0123);
c2 += 2;
psimd_store2_f32(c3, vacc3x0123);
c3 += 2;
psimd_store2_f32(c4, vacc4x0123);
c4 += 2;
psimd_store2_f32(c5, vacc5x0123);
c5 += 2;
vacc0x0123 = psimd_concat_hi_f32(vacc0x0123, vacc0x0123);
vacc1x0123 = psimd_concat_hi_f32(vacc1x0123, vacc1x0123);
vacc2x0123 = psimd_concat_hi_f32(vacc2x0123, vacc2x0123);
vacc3x0123 = psimd_concat_hi_f32(vacc3x0123, vacc3x0123);
vacc4x0123 = psimd_concat_hi_f32(vacc4x0123, vacc4x0123);
vacc5x0123 = psimd_concat_hi_f32(vacc5x0123, vacc5x0123);
nr -= 2;
}
if (nr != 0) {
psimd_store1_f32(c0, vacc0x0123);
psimd_store1_f32(c1, vacc1x0123);
psimd_store1_f32(c2, vacc2x0123);
psimd_store1_f32(c3, vacc3x0123);
psimd_store1_f32(c4, vacc4x0123);
psimd_store1_f32(c5, vacc5x0123);
}
}
}
| 6,122
| 29.014706
| 72
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/sdwconv/up4x9-psimd.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <psimd.h>
#include <qnnpack/sdwconv.h>
void pytorch_sdwconv_ukernel_up4x9__psimd(
size_t channels,
size_t output_width,
const float** input,
const float* weights,
float* output,
size_t input_stride,
size_t output_increment,
const struct pytorch_qnnp_fp32_clamping_params
clamping_params[restrict static 1]) {
const psimd_f32 vmax = psimd_splat_f32(clamping_params->max);
const psimd_f32 vmin = psimd_splat_f32(clamping_params->min);
do {
const float* i0 = input[0];
const float* i1 = input[1];
const float* i2 = input[2];
const float* i3 = input[3];
const float* i4 = input[4];
const float* i5 = input[5];
const float* i6 = input[6];
const float* i7 = input[7];
const float* i8 = input[8];
input = (const float**)((uintptr_t)input + input_stride);
size_t c = channels;
const float* w = weights;
for (; c >= 4; c -= 4) {
psimd_f32 vacc = psimd_load_f32(w);
const psimd_f32 vi0 = psimd_load_f32(i0);
i0 += 4;
const psimd_f32 vk0 = psimd_load_f32(w + 8);
vacc += vi0 * vk0;
const psimd_f32 vi1 = psimd_load_f32(i1);
i1 += 4;
const psimd_f32 vk1 = psimd_load_f32(w + 12);
psimd_f32 vacc2 = vi1 * vk1;
const psimd_f32 vi2 = psimd_load_f32(i2);
i2 += 4;
const psimd_f32 vk2 = psimd_load_f32(w + 16);
vacc += vi2 * vk2;
const psimd_f32 vi3 = psimd_load_f32(i3);
i3 += 4;
const psimd_f32 vk3 = psimd_load_f32(w + 20);
vacc2 += vi3 * vk3;
const psimd_f32 vi4 = psimd_load_f32(i4);
i4 += 4;
const psimd_f32 vk4 = psimd_load_f32(w + 24);
vacc += vi4 * vk4;
const psimd_f32 vi5 = psimd_load_f32(i5);
i5 += 4;
const psimd_f32 vk5 = psimd_load_f32(w + 28);
vacc2 += vi5 * vk5;
const psimd_f32 vi6 = psimd_load_f32(i6);
i6 += 4;
const psimd_f32 vk6 = psimd_load_f32(w + 32);
vacc += vi6 * vk6;
const psimd_f32 vi7 = psimd_load_f32(i7);
i7 += 4;
const psimd_f32 vk7 = psimd_load_f32(w + 36);
vacc2 += vi7 * vk7;
const psimd_f32 vi8 = psimd_load_f32(i8);
i8 += 4;
const psimd_f32 vk8 = psimd_load_f32(w + 40);
vacc += vi8 * vk8;
vacc += vacc2;
vacc = psimd_min_f32(vacc, vmax);
vacc = psimd_max_f32(vacc, vmin);
psimd_store_f32(output, vacc);
w += 44;
}
if (c != 0) {
psimd_f32 vacc = psimd_load_f32(w);
c *= sizeof(float);
i0 = (const float*)((uintptr_t)i0 - c);
const psimd_f32 vi0 = psimd_load_f32(i0);
const psimd_f32 vk0 = psimd_load_f32(w + 8);
vacc += vi0 * vk0;
i1 = (const float*)((uintptr_t)i1 - c);
const psimd_f32 vi1 = psimd_load_f32(i1);
const psimd_f32 vk1 = psimd_load_f32(w + 12);
psimd_f32 vacc2 = vi1 * vk1;
i2 = (const float*)((uintptr_t)i2 - c);
const psimd_f32 vi2 = psimd_load_f32(i2);
const psimd_f32 vk2 = psimd_load_f32(w + 16);
vacc += vi2 * vk2;
i3 = (const float*)((uintptr_t)i3 - c);
const psimd_f32 vi3 = psimd_load_f32(i3);
const psimd_f32 vk3 = psimd_load_f32(w + 20);
vacc2 += vi3 * vk3;
i4 = (const float*)((uintptr_t)i4 - c);
const psimd_f32 vi4 = psimd_load_f32(i4);
const psimd_f32 vk4 = psimd_load_f32(w + 24);
vacc += vi4 * vk4;
i5 = (const float*)((uintptr_t)i5 - c);
const psimd_f32 vi5 = psimd_load_f32(i5);
const psimd_f32 vk5 = psimd_load_f32(w + 28);
vacc2 += vi5 * vk5;
i6 = (const float*)((uintptr_t)i6 - c);
const psimd_f32 vi6 = psimd_load_f32(i6);
const psimd_f32 vk6 = psimd_load_f32(w + 32);
vacc += vi6 * vk6;
i7 = (const float*)((uintptr_t)i7 - c);
const psimd_f32 vi7 = psimd_load_f32(i7);
const psimd_f32 vk7 = psimd_load_f32(w + 36);
vacc2 += vi7 * vk7;
i8 = (const float*)((uintptr_t)i8 - c);
const psimd_f32 vi8 = psimd_load_f32(i8);
const psimd_f32 vk8 = psimd_load_f32(w + 40);
vacc += vi8 * vk8;
vacc += vacc2;
vacc = psimd_min_f32(vacc, vmax);
vacc = psimd_max_f32(vacc, vmin);
output = (float*)((uintptr_t)output - c);
psimd_store_f32(output, vacc);
}
output = (float*)((uintptr_t)output + output_increment);
} while (--output_width != 0);
}
| 4,573
| 28.133758
| 72
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/sgemm/5x8-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <arm_neon.h>
#include <qnnpack/sgemm.h>
void pytorch_sgemm_ukernel_5x8__neon(
size_t mr,
size_t nr,
size_t k,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t c_stride,
const struct pytorch_qnnp_fp32_clamping_params
clamping_params[restrict static 1]) {
float32x4_t vacc0x0123 = vld1q_f32(w);
w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w);
w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
const float* a0 = a;
const float* a1 = (const float*)((uintptr_t)a0 + a_stride);
if (mr < 2) {
a1 = a0;
}
const float* a2 = (const float*)((uintptr_t)a1 + a_stride);
if (mr <= 2) {
a2 = a1;
}
const float* a3 = (const float*)((uintptr_t)a2 + a_stride);
if (mr < 4) {
a3 = a2;
}
const float* a4 = (const float*)((uintptr_t)a3 + a_stride);
if (mr <= 4) {
a4 = a3;
}
for (; k >= 2; k -= 2) {
const float32x2_t va0 = vld1_f32(a0);
a0 += 2;
const float32x2_t va1 = vld1_f32(a1);
a1 += 2;
const float32x2_t va2 = vld1_f32(a2);
a2 += 2;
const float32x2_t va3 = vld1_f32(a3);
a3 += 2;
const float32x2_t va4 = vld1_f32(a4);
a4 += 2;
{
const float32x4_t vb0123 = vld1q_f32(w);
w += 4;
const float32x4_t vb4567 = vld1q_f32(w);
w += 4;
#if defined(__aarch64__)
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123, va0, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123, va1, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123, va2, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123, va3, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567, va3, 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123, va4, 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567, va4, 0);
#else
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123, va0, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123, va1, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123, va2, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123, va3, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567, va3, 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123, va4, 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567, va4, 0);
#endif
}
{
const float32x4_t vb0123 = vld1q_f32(w);
w += 4;
const float32x4_t vb4567 = vld1q_f32(w);
w += 4;
#if defined(__aarch64__)
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123, va0, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123, va1, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123, va2, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123, va3, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567, va3, 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123, va4, 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567, va4, 1);
#else
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123, va0, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123, va1, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123, va2, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123, va3, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567, va3, 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123, va4, 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567, va4, 1);
#endif
}
}
if (k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0);
const float32x4_t va1 = vld1q_dup_f32(a1);
const float32x4_t va2 = vld1q_dup_f32(a2);
const float32x4_t va3 = vld1q_dup_f32(a3);
const float32x4_t va4 = vld1q_dup_f32(a4);
const float32x4_t vb0123 = vld1q_f32(w);
w += 4;
const float32x4_t vb4567 = vld1q_f32(w);
w += 4;
#if defined(__aarch64__)
vacc0x0123 = vfmaq_f32(vacc0x0123, vb0123, va0);
vacc0x4567 = vfmaq_f32(vacc0x4567, vb4567, va0);
vacc1x0123 = vfmaq_f32(vacc1x0123, vb0123, va1);
vacc1x4567 = vfmaq_f32(vacc1x4567, vb4567, va1);
vacc2x0123 = vfmaq_f32(vacc2x0123, vb0123, va2);
vacc2x4567 = vfmaq_f32(vacc2x4567, vb4567, va2);
vacc3x0123 = vfmaq_f32(vacc3x0123, vb0123, va3);
vacc3x4567 = vfmaq_f32(vacc3x4567, vb4567, va3);
vacc4x0123 = vfmaq_f32(vacc4x0123, vb0123, va4);
vacc4x4567 = vfmaq_f32(vacc4x4567, vb4567, va4);
#else
vacc0x0123 = vmlaq_f32(vacc0x0123, vb0123, va0);
vacc0x4567 = vmlaq_f32(vacc0x4567, vb4567, va0);
vacc1x0123 = vmlaq_f32(vacc1x0123, vb0123, va1);
vacc1x4567 = vmlaq_f32(vacc1x4567, vb4567, va1);
vacc2x0123 = vmlaq_f32(vacc2x0123, vb0123, va2);
vacc2x4567 = vmlaq_f32(vacc2x4567, vb4567, va2);
vacc3x0123 = vmlaq_f32(vacc3x0123, vb0123, va3);
vacc3x4567 = vmlaq_f32(vacc3x4567, vb4567, va3);
vacc4x0123 = vmlaq_f32(vacc4x0123, vb0123, va4);
vacc4x4567 = vmlaq_f32(vacc4x4567, vb4567, va4);
#endif
}
const float32x4_t vmax = vld1q_dup_f32(&clamping_params->max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(&clamping_params->min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
float* c0 = c;
float* c1 = (float*)((uintptr_t)c0 + c_stride);
if (mr < 2) {
c1 = c0;
}
float* c2 = (float*)((uintptr_t)c1 + c_stride);
if (mr <= 2) {
c2 = c1;
}
float* c3 = (float*)((uintptr_t)c2 + c_stride);
if (mr < 4) {
c3 = c2;
}
float* c4 = (float*)((uintptr_t)c3 + c_stride);
if (mr <= 4) {
c4 = c3;
}
if (nr == 8) {
vst1q_f32(c0, vacc0x0123);
c0 += 4;
vst1q_f32(c1, vacc1x0123);
c1 += 4;
vst1q_f32(c2, vacc2x0123);
c2 += 4;
vst1q_f32(c3, vacc3x0123);
c3 += 4;
vst1q_f32(c4, vacc4x0123);
c4 += 4;
vst1q_f32(c0, vacc0x4567);
vst1q_f32(c1, vacc1x4567);
vst1q_f32(c2, vacc2x4567);
vst1q_f32(c3, vacc3x4567);
vst1q_f32(c4, vacc4x4567);
} else {
if (nr >= 4) {
vst1q_f32(c0, vacc0x0123);
c0 += 4;
vst1q_f32(c1, vacc1x0123);
c1 += 4;
vst1q_f32(c2, vacc2x0123);
c2 += 4;
vst1q_f32(c3, vacc3x0123);
c3 += 4;
vst1q_f32(c4, vacc4x0123);
c4 += 4;
vacc0x0123 = vacc0x4567;
vacc1x0123 = vacc1x4567;
vacc2x0123 = vacc2x4567;
vacc3x0123 = vacc3x4567;
vacc4x0123 = vacc4x4567;
nr -= 4;
}
if (nr >= 2) {
vst1_f32(c0, vget_low_f32(vacc0x0123));
c0 += 2;
vst1_f32(c1, vget_low_f32(vacc1x0123));
c1 += 2;
vst1_f32(c2, vget_low_f32(vacc2x0123));
c2 += 2;
vst1_f32(c3, vget_low_f32(vacc3x0123));
c3 += 2;
vst1_f32(c4, vget_low_f32(vacc4x0123));
c4 += 2;
vacc0x0123 = vextq_f32(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vextq_f32(vacc1x0123, vacc1x0123, 2);
vacc2x0123 = vextq_f32(vacc2x0123, vacc2x0123, 2);
vacc3x0123 = vextq_f32(vacc3x0123, vacc3x0123, 2);
vacc4x0123 = vextq_f32(vacc4x0123, vacc4x0123, 2);
nr -= 2;
}
if (nr != 0) {
vst1q_lane_f32(c0, vacc0x0123, 0);
vst1q_lane_f32(c1, vacc1x0123, 0);
vst1q_lane_f32(c2, vacc2x0123, 0);
vst1q_lane_f32(c3, vacc3x0123, 0);
vst1q_lane_f32(c4, vacc4x0123, 0);
}
}
}
| 9,104
| 32.847584
| 72
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/sgemm/6x8-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <arm_neon.h>
#include <qnnpack/sgemm.h>
void pytorch_sgemm_ukernel_6x8__neon(
size_t mr,
size_t nr,
size_t k,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t c_stride,
const struct pytorch_qnnp_fp32_clamping_params
clamping_params[restrict static 1]) {
float32x4_t vacc0x0123 = vld1q_f32(w);
w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w);
w += 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
const float* a0 = a;
const float* a1 = (const float*)((uintptr_t)a0 + a_stride);
if (mr < 2) {
a1 = a0;
}
const float* a2 = (const float*)((uintptr_t)a1 + a_stride);
if (mr <= 2) {
a2 = a1;
}
const float* a3 = (const float*)((uintptr_t)a2 + a_stride);
if (mr < 4) {
a3 = a2;
}
const float* a4 = (const float*)((uintptr_t)a3 + a_stride);
if (mr <= 4) {
a4 = a3;
}
const float* a5 = (const float*)((uintptr_t)a4 + a_stride);
if (mr != 6) {
a5 = a4;
}
for (; k >= 2; k -= 2) {
const float32x2_t va0 = vld1_f32(a0);
a0 += 2;
const float32x2_t va1 = vld1_f32(a1);
a1 += 2;
const float32x2_t va2 = vld1_f32(a2);
a2 += 2;
const float32x2_t va3 = vld1_f32(a3);
a3 += 2;
const float32x2_t va4 = vld1_f32(a4);
a4 += 2;
const float32x2_t va5 = vld1_f32(a5);
a5 += 2;
{
const float32x4_t vb0123 = vld1q_f32(w);
w += 4;
const float32x4_t vb4567 = vld1q_f32(w);
w += 4;
#if defined(__aarch64__)
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123, va0, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123, va1, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123, va2, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123, va3, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567, va3, 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123, va4, 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567, va4, 0);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123, va5, 0);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567, va5, 0);
#else
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123, va0, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123, va1, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123, va2, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123, va3, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567, va3, 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123, va4, 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567, va4, 0);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123, va5, 0);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567, va5, 0);
#endif
}
{
const float32x4_t vb0123 = vld1q_f32(w);
w += 4;
const float32x4_t vb4567 = vld1q_f32(w);
w += 4;
#if defined(__aarch64__)
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123, va0, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123, va1, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123, va2, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123, va3, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567, va3, 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123, va4, 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567, va4, 1);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123, va5, 1);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567, va5, 1);
#else
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123, va0, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123, va1, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123, va2, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123, va3, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567, va3, 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123, va4, 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567, va4, 1);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123, va5, 1);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567, va5, 1);
#endif
}
}
if (k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0);
const float32x4_t va1 = vld1q_dup_f32(a1);
const float32x4_t va2 = vld1q_dup_f32(a2);
const float32x4_t va3 = vld1q_dup_f32(a3);
const float32x4_t va4 = vld1q_dup_f32(a4);
const float32x4_t va5 = vld1q_dup_f32(a5);
const float32x4_t vb0123 = vld1q_f32(w);
w += 4;
const float32x4_t vb4567 = vld1q_f32(w);
w += 4;
#if defined(__aarch64__)
vacc0x0123 = vfmaq_f32(vacc0x0123, vb0123, va0);
vacc0x4567 = vfmaq_f32(vacc0x4567, vb4567, va0);
vacc1x0123 = vfmaq_f32(vacc1x0123, vb0123, va1);
vacc1x4567 = vfmaq_f32(vacc1x4567, vb4567, va1);
vacc2x0123 = vfmaq_f32(vacc2x0123, vb0123, va2);
vacc2x4567 = vfmaq_f32(vacc2x4567, vb4567, va2);
vacc3x0123 = vfmaq_f32(vacc3x0123, vb0123, va3);
vacc3x4567 = vfmaq_f32(vacc3x4567, vb4567, va3);
vacc4x0123 = vfmaq_f32(vacc4x0123, vb0123, va4);
vacc4x4567 = vfmaq_f32(vacc4x4567, vb4567, va4);
vacc5x0123 = vfmaq_f32(vacc5x0123, vb0123, va5);
vacc5x4567 = vfmaq_f32(vacc5x4567, vb4567, va5);
#else
vacc0x0123 = vmlaq_f32(vacc0x0123, vb0123, va0);
vacc0x4567 = vmlaq_f32(vacc0x4567, vb4567, va0);
vacc1x0123 = vmlaq_f32(vacc1x0123, vb0123, va1);
vacc1x4567 = vmlaq_f32(vacc1x4567, vb4567, va1);
vacc2x0123 = vmlaq_f32(vacc2x0123, vb0123, va2);
vacc2x4567 = vmlaq_f32(vacc2x4567, vb4567, va2);
vacc3x0123 = vmlaq_f32(vacc3x0123, vb0123, va3);
vacc3x4567 = vmlaq_f32(vacc3x4567, vb4567, va3);
vacc4x0123 = vmlaq_f32(vacc4x0123, vb0123, va4);
vacc4x4567 = vmlaq_f32(vacc4x4567, vb4567, va4);
vacc5x0123 = vmlaq_f32(vacc5x0123, vb0123, va5);
vacc5x4567 = vmlaq_f32(vacc5x4567, vb4567, va5);
#endif
}
const float32x4_t vmax = vld1q_dup_f32(&clamping_params->max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(&clamping_params->min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
float* c0 = c;
float* c1 = (float*)((uintptr_t)c0 + c_stride);
if (mr < 2) {
c1 = c0;
}
float* c2 = (float*)((uintptr_t)c1 + c_stride);
if (mr <= 2) {
c2 = c1;
}
float* c3 = (float*)((uintptr_t)c2 + c_stride);
if (mr < 4) {
c3 = c2;
}
float* c4 = (float*)((uintptr_t)c3 + c_stride);
if (mr <= 4) {
c4 = c3;
}
float* c5 = (float*)((uintptr_t)c4 + c_stride);
if (mr != 6) {
c5 = c4;
}
if (nr == 8) {
vst1q_f32(c0, vacc0x0123);
c0 += 4;
vst1q_f32(c1, vacc1x0123);
c1 += 4;
vst1q_f32(c2, vacc2x0123);
c2 += 4;
vst1q_f32(c3, vacc3x0123);
c3 += 4;
vst1q_f32(c4, vacc4x0123);
c4 += 4;
vst1q_f32(c5, vacc5x0123);
c5 += 4;
vst1q_f32(c0, vacc0x4567);
vst1q_f32(c1, vacc1x4567);
vst1q_f32(c2, vacc2x4567);
vst1q_f32(c3, vacc3x4567);
vst1q_f32(c4, vacc4x4567);
vst1q_f32(c5, vacc5x4567);
} else {
if (nr >= 4) {
vst1q_f32(c0, vacc0x0123);
c0 += 4;
vst1q_f32(c1, vacc1x0123);
c1 += 4;
vst1q_f32(c2, vacc2x0123);
c2 += 4;
vst1q_f32(c3, vacc3x0123);
c3 += 4;
vst1q_f32(c4, vacc4x0123);
c4 += 4;
vst1q_f32(c5, vacc5x0123);
c5 += 4;
vacc0x0123 = vacc0x4567;
vacc1x0123 = vacc1x4567;
vacc2x0123 = vacc2x4567;
vacc3x0123 = vacc3x4567;
vacc4x0123 = vacc4x4567;
vacc5x0123 = vacc5x4567;
nr -= 4;
}
if (nr >= 2) {
vst1_f32(c0, vget_low_f32(vacc0x0123));
c0 += 2;
vst1_f32(c1, vget_low_f32(vacc1x0123));
c1 += 2;
vst1_f32(c2, vget_low_f32(vacc2x0123));
c2 += 2;
vst1_f32(c3, vget_low_f32(vacc3x0123));
c3 += 2;
vst1_f32(c4, vget_low_f32(vacc4x0123));
c4 += 2;
vst1_f32(c5, vget_low_f32(vacc5x0123));
c5 += 2;
vacc0x0123 = vextq_f32(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vextq_f32(vacc1x0123, vacc1x0123, 2);
vacc2x0123 = vextq_f32(vacc2x0123, vacc2x0123, 2);
vacc3x0123 = vextq_f32(vacc3x0123, vacc3x0123, 2);
vacc4x0123 = vextq_f32(vacc4x0123, vacc4x0123, 2);
vacc5x0123 = vextq_f32(vacc5x0123, vacc5x0123, 2);
nr -= 2;
}
if (nr != 0) {
vst1q_lane_f32(c0, vacc0x0123, 0);
vst1q_lane_f32(c1, vacc1x0123, 0);
vst1q_lane_f32(c2, vacc2x0123, 0);
vst1q_lane_f32(c3, vacc3x0123, 0);
vst1q_lane_f32(c4, vacc4x0123, 0);
vst1q_lane_f32(c5, vacc5x0123, 0);
}
}
}
| 10,669
| 33.642857
| 72
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/sgemm/6x8-psimd.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <psimd.h>
#include <qnnpack/sgemm.h>
void pytorch_sgemm_ukernel_6x8__psimd(
size_t mr,
size_t nr,
size_t k,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t c_stride,
const struct pytorch_qnnp_fp32_clamping_params
clamping_params[restrict static 1]) {
psimd_f32 vacc0x0123 = psimd_load_f32(w);
w += 4;
psimd_f32 vacc0x4567 = psimd_load_f32(w);
w += 4;
psimd_f32 vacc1x0123 = vacc0x0123;
psimd_f32 vacc1x4567 = vacc0x4567;
psimd_f32 vacc2x0123 = vacc0x0123;
psimd_f32 vacc2x4567 = vacc0x4567;
psimd_f32 vacc3x0123 = vacc0x0123;
psimd_f32 vacc3x4567 = vacc0x4567;
psimd_f32 vacc4x0123 = vacc0x0123;
psimd_f32 vacc4x4567 = vacc0x4567;
psimd_f32 vacc5x0123 = vacc0x0123;
psimd_f32 vacc5x4567 = vacc0x4567;
const float* a0 = a;
const float* a1 = (const float*)((uintptr_t)a0 + a_stride);
if (mr < 2) {
a1 = a0;
}
const float* a2 = (const float*)((uintptr_t)a1 + a_stride);
if (mr <= 2) {
a2 = a1;
}
const float* a3 = (const float*)((uintptr_t)a2 + a_stride);
if (mr < 4) {
a3 = a2;
}
const float* a4 = (const float*)((uintptr_t)a3 + a_stride);
if (mr <= 4) {
a4 = a3;
}
const float* a5 = (const float*)((uintptr_t)a4 + a_stride);
if (mr != 6) {
a5 = a4;
}
do {
const psimd_f32 va0 = psimd_splat_f32(*a0);
a0 += 1;
const psimd_f32 va1 = psimd_splat_f32(*a1);
a1 += 1;
const psimd_f32 va2 = psimd_splat_f32(*a2);
a2 += 1;
const psimd_f32 va3 = psimd_splat_f32(*a3);
a3 += 1;
const psimd_f32 va4 = psimd_splat_f32(*a4);
a4 += 1;
const psimd_f32 va5 = psimd_splat_f32(*a5);
a5 += 1;
const psimd_f32 vb0123 = psimd_load_f32(w);
w += 4;
const psimd_f32 vb4567 = psimd_load_f32(w);
w += 4;
vacc0x0123 += vb0123 * va0;
vacc0x4567 += vb4567 * va0;
vacc1x0123 += vb0123 * va1;
vacc1x4567 += vb4567 * va1;
vacc2x0123 += vb0123 * va2;
vacc2x4567 += vb4567 * va2;
vacc3x0123 += vb0123 * va3;
vacc3x4567 += vb4567 * va3;
vacc4x0123 += vb0123 * va4;
vacc4x4567 += vb4567 * va4;
vacc5x0123 += vb0123 * va5;
vacc5x4567 += vb4567 * va5;
} while (--k != 0);
const psimd_f32 vmax = psimd_splat_f32(clamping_params->max);
vacc0x0123 = psimd_min_f32(vacc0x0123, vmax);
vacc0x4567 = psimd_min_f32(vacc0x4567, vmax);
vacc1x0123 = psimd_min_f32(vacc1x0123, vmax);
vacc1x4567 = psimd_min_f32(vacc1x4567, vmax);
vacc2x0123 = psimd_min_f32(vacc2x0123, vmax);
vacc2x4567 = psimd_min_f32(vacc2x4567, vmax);
vacc3x0123 = psimd_min_f32(vacc3x0123, vmax);
vacc3x4567 = psimd_min_f32(vacc3x4567, vmax);
vacc4x0123 = psimd_min_f32(vacc4x0123, vmax);
vacc4x4567 = psimd_min_f32(vacc4x4567, vmax);
vacc5x0123 = psimd_min_f32(vacc5x0123, vmax);
vacc5x4567 = psimd_min_f32(vacc5x4567, vmax);
const psimd_f32 vmin = psimd_splat_f32(clamping_params->min);
vacc0x0123 = psimd_max_f32(vacc0x0123, vmin);
vacc0x4567 = psimd_max_f32(vacc0x4567, vmin);
vacc1x0123 = psimd_max_f32(vacc1x0123, vmin);
vacc1x4567 = psimd_max_f32(vacc1x4567, vmin);
vacc2x0123 = psimd_max_f32(vacc2x0123, vmin);
vacc2x4567 = psimd_max_f32(vacc2x4567, vmin);
vacc3x0123 = psimd_max_f32(vacc3x0123, vmin);
vacc3x4567 = psimd_max_f32(vacc3x4567, vmin);
vacc4x0123 = psimd_max_f32(vacc4x0123, vmin);
vacc4x4567 = psimd_max_f32(vacc4x4567, vmin);
vacc5x0123 = psimd_max_f32(vacc5x0123, vmin);
vacc5x4567 = psimd_max_f32(vacc5x4567, vmin);
float* c0 = c;
float* c1 = (float*)((uintptr_t)c0 + c_stride);
if (mr < 2) {
c1 = c0;
}
float* c2 = (float*)((uintptr_t)c1 + c_stride);
if (mr <= 2) {
c2 = c1;
}
float* c3 = (float*)((uintptr_t)c2 + c_stride);
if (mr < 4) {
c3 = c2;
}
float* c4 = (float*)((uintptr_t)c3 + c_stride);
if (mr <= 4) {
c4 = c3;
}
float* c5 = (float*)((uintptr_t)c4 + c_stride);
if (mr != 6) {
c5 = c4;
}
if (nr == 8) {
psimd_store_f32(c0, vacc0x0123);
c0 += 4;
psimd_store_f32(c1, vacc1x0123);
c1 += 4;
psimd_store_f32(c2, vacc2x0123);
c2 += 4;
psimd_store_f32(c3, vacc3x0123);
c3 += 4;
psimd_store_f32(c4, vacc4x0123);
c4 += 4;
psimd_store_f32(c5, vacc5x0123);
c5 += 4;
psimd_store_f32(c0, vacc0x4567);
psimd_store_f32(c1, vacc1x4567);
psimd_store_f32(c2, vacc2x4567);
psimd_store_f32(c3, vacc3x4567);
psimd_store_f32(c4, vacc4x4567);
psimd_store_f32(c5, vacc5x4567);
} else {
if (nr >= 4) {
psimd_store_f32(c0, vacc0x0123);
c0 += 4;
psimd_store_f32(c1, vacc1x0123);
c1 += 4;
psimd_store_f32(c2, vacc2x0123);
c2 += 4;
psimd_store_f32(c3, vacc3x0123);
c3 += 4;
psimd_store_f32(c4, vacc4x0123);
c4 += 4;
psimd_store_f32(c5, vacc5x0123);
c5 += 4;
vacc0x0123 = vacc0x4567;
vacc1x0123 = vacc1x4567;
vacc2x0123 = vacc2x4567;
vacc3x0123 = vacc3x4567;
vacc4x0123 = vacc4x4567;
vacc5x0123 = vacc5x4567;
nr -= 4;
}
if (nr >= 2) {
psimd_store2_f32(c0, vacc0x0123);
c0 += 2;
psimd_store2_f32(c1, vacc1x0123);
c1 += 2;
psimd_store2_f32(c2, vacc2x0123);
c2 += 2;
psimd_store2_f32(c3, vacc3x0123);
c3 += 2;
psimd_store2_f32(c4, vacc4x0123);
c4 += 2;
psimd_store2_f32(c5, vacc5x0123);
c5 += 2;
vacc0x0123 = psimd_concat_hi_f32(vacc0x0123, vacc0x0123);
vacc1x0123 = psimd_concat_hi_f32(vacc1x0123, vacc1x0123);
vacc2x0123 = psimd_concat_hi_f32(vacc2x0123, vacc2x0123);
vacc3x0123 = psimd_concat_hi_f32(vacc3x0123, vacc3x0123);
vacc4x0123 = psimd_concat_hi_f32(vacc4x0123, vacc4x0123);
vacc5x0123 = psimd_concat_hi_f32(vacc5x0123, vacc5x0123);
nr -= 2;
}
if (nr != 0) {
psimd_store1_f32(c0, vacc0x0123);
psimd_store1_f32(c1, vacc1x0123);
psimd_store1_f32(c2, vacc2x0123);
psimd_store1_f32(c3, vacc3x0123);
psimd_store1_f32(c4, vacc4x0123);
psimd_store1_f32(c5, vacc5x0123);
}
}
}
| 6,296
| 28.152778
| 72
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/u8clamp/neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <arm_neon.h>
#include <qnnpack/u8clamp.h>
void pytorch_u8clamp_ukernel__neon(
size_t n,
const uint8_t* x,
uint8_t* y,
const union pytorch_qnnp_u8_clamping_params params[restrict static 1]) {
assert(n != 0);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
if
PYTORCH_QNNP_LIKELY(n >= 8) {
for (; n >= 64; n -= 64) {
const uint8x16_t vx0 = vld1q_u8(x);
x += 16;
const uint8x16_t vx1 = vld1q_u8(x);
x += 16;
const uint8x16_t vx2 = vld1q_u8(x);
x += 16;
const uint8x16_t vx3 = vld1q_u8(x);
x += 16;
const uint8x16_t vy0 =
vminq_u8(vmaxq_u8(vx0, voutput_min), voutput_max);
const uint8x16_t vy1 =
vminq_u8(vmaxq_u8(vx1, voutput_min), voutput_max);
const uint8x16_t vy2 =
vminq_u8(vmaxq_u8(vx2, voutput_min), voutput_max);
const uint8x16_t vy3 =
vminq_u8(vmaxq_u8(vx3, voutput_min), voutput_max);
__builtin_prefetch(x + 640);
vst1q_u8(y, vy0);
y += 16;
vst1q_u8(y, vy1);
y += 16;
vst1q_u8(y, vy2);
y += 16;
vst1q_u8(y, vy3);
y += 16;
}
for (; n >= 8; n -= 8) {
uint8x8_t vout = vld1_u8(x);
x += 8;
vout = vmin_u8(vout, vget_low_u8(voutput_max));
vout = vmax_u8(vout, vget_low_u8(voutput_min));
vst1_u8(y, vout);
y += 8;
}
if (n != 0) {
const size_t n_increment = n - 8;
x = (const uint8_t*)((uintptr_t)x + n_increment);
y = (uint8_t*)((uintptr_t)y + n_increment);
uint8x8_t vout = vld1_u8(x);
vout = vmin_u8(vout, vget_low_u8(voutput_max));
vout = vmax_u8(vout, vget_low_u8(voutput_min));
vst1_u8(y, vout);
}
}
else {
do {
uint8x8_t vout = vld1_dup_u8(x);
x += 1;
vout = vmin_u8(vout, vget_low_u8(voutput_max));
vout = vmax_u8(vout, vget_low_u8(voutput_min));
vst1_lane_u8(y, vout, 0);
y += 1;
} while (--n != 0);
}
}
| 2,399
| 26.586207
| 76
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/u8clamp/sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <emmintrin.h>
#include <qnnpack/u8clamp.h>
void pytorch_u8clamp_ukernel__sse2(
size_t n,
const uint8_t* x,
uint8_t* y,
const union pytorch_qnnp_u8_clamping_params params[RESTRICT_STATIC 1]) {
assert(n != 0);
if
PYTORCH_QNNP_LIKELY(n >= 8) {
const __m128i voutput_max =
_mm_load_si128((const __m128i*)¶ms->sse2.output_max);
const __m128i voutput_min =
_mm_load_si128((const __m128i*)¶ms->sse2.output_min);
for (; n >= 64; n -= 64) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*)x);
const __m128i vx1 = _mm_loadu_si128((const __m128i*)x + 1);
const __m128i vx2 = _mm_loadu_si128((const __m128i*)x + 2);
const __m128i vx3 = _mm_loadu_si128((const __m128i*)x + 3);
x += 64;
const __m128i vy0 =
_mm_min_epu8(_mm_max_epu8(vx0, voutput_min), voutput_max);
const __m128i vy1 =
_mm_min_epu8(_mm_max_epu8(vx1, voutput_min), voutput_max);
const __m128i vy2 =
_mm_min_epu8(_mm_max_epu8(vx2, voutput_min), voutput_max);
const __m128i vy3 =
_mm_min_epu8(_mm_max_epu8(vx3, voutput_min), voutput_max);
__builtin_prefetch(x + 640);
_mm_storeu_si128((__m128i*)y, vy0);
_mm_storeu_si128((__m128i*)y + 1, vy1);
_mm_storeu_si128((__m128i*)y + 2, vy2);
_mm_storeu_si128((__m128i*)y + 3, vy3);
y += 64;
}
for (; n >= 8; n -= 8) {
__m128i vout = _mm_loadl_epi64((const __m128i*)x);
x += 8;
vout = _mm_min_epu8(vout, voutput_max);
vout = _mm_max_epu8(vout, voutput_min);
_mm_storel_epi64((__m128i*)y, vout);
y += 8;
}
if (n != 0) {
const size_t n_increment = n - 8;
x = (const uint8_t*)((uintptr_t)x + n_increment);
y = (uint8_t*)((uintptr_t)y + n_increment);
__m128i vout = _mm_loadl_epi64((const __m128i*)x);
vout = _mm_min_epu8(vout, voutput_max);
vout = _mm_max_epu8(vout, voutput_min);
_mm_storel_epi64((__m128i*)y, vout);
}
}
else {
const uint32_t voutput_max = params->sse2.output_max[0];
const uint32_t voutput_min = params->sse2.output_min[0];
do {
uint32_t vout = *x++;
vout = vout > voutput_max ? voutput_max : vout;
vout = vout < voutput_min ? voutput_min : vout;
*y++ = (uint8_t)vout;
} while (--n != 0);
}
}
| 2,680
| 31.695122
| 76
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/u8lut32norm/scalar.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <fxdiv.h>
#include <qnnpack/u8lut32norm.h>
static inline uint32_t compute_sum(
size_t n,
const uint8_t* x,
const uint32_t* t) {
assert(n != 0);
uint32_t vsum = 0;
do {
const size_t vx = *x++;
vsum += t[vx];
} while (--n != 0);
return vsum;
}
void pytorch_u8lut32norm_ukernel__scalar(
size_t n,
const uint8_t* x,
const uint32_t* t,
uint8_t* y) {
assert(n != 0);
const uint32_t vsum = compute_sum(n, x, t);
assert(vsum != 0);
struct fxdiv_divisor_uint32_t vsum_divisor = fxdiv_init_uint32_t(vsum);
const uint32_t vrounding = (vsum >> 1);
do {
const size_t vx = *x++;
const uint32_t vt = t[vx];
const uint32_t vq =
fxdiv_quotient_uint32_t((vt << 8) + vrounding, vsum_divisor);
const uint8_t vy = vq > 255 ? UINT8_C(255) : (uint8_t)vq;
*y++ = vy;
} while (--n != 0);
}
| 1,108
| 21.18
| 73
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/u8maxpool/16x9p8q-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <arm_neon.h>
#include <qnnpack/u8maxpool.h>
void pytorch_u8maxpool_ukernel_16x9p8q__neon(
size_t n,
size_t ks,
size_t kc,
const uint8_t** input,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union pytorch_qnnp_u8_clamping_params params[restrict static 1]) {
assert(n != 0);
assert(ks != 0);
assert(kc >= 16);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
do {
uint8_t* o = output;
{
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
const uint8_t* i8 = *input++;
if (ks < 2) {
i1 = i0;
}
if (ks <= 2) {
i2 = i0;
}
if (ks < 4) {
i3 = i0;
}
if (ks <= 4) {
i4 = i0;
}
if (ks < 6) {
i5 = i0;
}
if (ks <= 6) {
i6 = i0;
}
if (ks < 8) {
i7 = i0;
}
if (ks <= 8) {
i8 = i0;
}
size_t k = kc;
while (k >= 16) {
const uint8x16_t vi0 = vld1q_u8(i0);
i0 += 16;
const uint8x16_t vi1 = vld1q_u8(i1);
i1 += 16;
const uint8x16_t vi2 = vld1q_u8(i2);
i2 += 16;
const uint8x16_t vi3 = vld1q_u8(i3);
i3 += 16;
const uint8x16_t vi4 = vld1q_u8(i4);
i4 += 16;
const uint8x16_t vi5 = vld1q_u8(i5);
i5 += 16;
const uint8x16_t vi6 = vld1q_u8(i6);
i6 += 16;
const uint8x16_t vi7 = vld1q_u8(i7);
i7 += 16;
const uint8x16_t vi8 = vld1q_u8(i8);
i8 += 16;
const uint8x16_t vmax018 = vmaxq_u8(vmaxq_u8(vi0, vi1), vi8);
const uint8x16_t vmax23 = vmaxq_u8(vi2, vi3);
const uint8x16_t vmax45 = vmaxq_u8(vi4, vi5);
const uint8x16_t vmax67 = vmaxq_u8(vi6, vi7);
const uint8x16_t vmax2345 = vmaxq_u8(vmax23, vmax45);
const uint8x16_t vmax01678 = vmaxq_u8(vmax018, vmax67);
const uint8x16_t vmax = vmaxq_u8(vmax2345, vmax01678);
const uint8x16_t vout =
vmaxq_u8(vminq_u8(vmax, voutput_max), voutput_min);
vst1q_u8(o, vout);
o += 16;
k -= 16;
}
if (k != 0) {
const size_t address_increment = k - 16;
i0 = (const uint8_t*)((uintptr_t)i0 + address_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + address_increment);
i2 = (const uint8_t*)((uintptr_t)i2 + address_increment);
i3 = (const uint8_t*)((uintptr_t)i3 + address_increment);
i4 = (const uint8_t*)((uintptr_t)i4 + address_increment);
i5 = (const uint8_t*)((uintptr_t)i5 + address_increment);
i6 = (const uint8_t*)((uintptr_t)i6 + address_increment);
i7 = (const uint8_t*)((uintptr_t)i7 + address_increment);
i8 = (const uint8_t*)((uintptr_t)i8 + address_increment);
o = (uint8_t*)((uintptr_t)o + address_increment);
const uint8x16_t vi0 = vld1q_u8(i0);
const uint8x16_t vi1 = vld1q_u8(i1);
const uint8x16_t vi2 = vld1q_u8(i2);
const uint8x16_t vi3 = vld1q_u8(i3);
const uint8x16_t vi4 = vld1q_u8(i4);
const uint8x16_t vi5 = vld1q_u8(i5);
const uint8x16_t vi6 = vld1q_u8(i6);
const uint8x16_t vi7 = vld1q_u8(i7);
const uint8x16_t vi8 = vld1q_u8(i8);
const uint8x16_t vmax018 = vmaxq_u8(vmaxq_u8(vi0, vi1), vi8);
const uint8x16_t vmax23 = vmaxq_u8(vi2, vi3);
const uint8x16_t vmax45 = vmaxq_u8(vi4, vi5);
const uint8x16_t vmax67 = vmaxq_u8(vi6, vi7);
const uint8x16_t vmax2345 = vmaxq_u8(vmax23, vmax45);
const uint8x16_t vmax01678 = vmaxq_u8(vmax018, vmax67);
const uint8x16_t vmax = vmaxq_u8(vmax2345, vmax01678);
const uint8x16_t vout =
vmaxq_u8(vminq_u8(vmax, voutput_max), voutput_min);
vst1q_u8(o, vout);
o += 16;
}
}
for (ptrdiff_t m = (ptrdiff_t)ks - 9; m > 0; m -= 8) {
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
if (m < 2) {
i1 = i0;
}
if (m <= 2) {
i2 = i0;
}
if (m < 4) {
i3 = i0;
}
if (m <= 4) {
i4 = i0;
}
if (m < 6) {
i5 = i0;
}
if (m <= 6) {
i6 = i0;
}
if (m < 8) {
i7 = i0;
}
o = output;
size_t k = kc;
while (k >= 16) {
const uint8x16_t vi0 = vld1q_u8(i0);
i0 += 16;
const uint8x16_t vi1 = vld1q_u8(i1);
i1 += 16;
const uint8x16_t vi2 = vld1q_u8(i2);
i2 += 16;
const uint8x16_t vi3 = vld1q_u8(i3);
i3 += 16;
const uint8x16_t vi4 = vld1q_u8(i4);
i4 += 16;
const uint8x16_t vi5 = vld1q_u8(i5);
i5 += 16;
const uint8x16_t vi6 = vld1q_u8(i6);
i6 += 16;
const uint8x16_t vi7 = vld1q_u8(i7);
i7 += 16;
const uint8x16_t vo = vld1q_u8(o);
const uint8x16_t vmax01 = vmaxq_u8(vmaxq_u8(vi0, vi1), vo);
const uint8x16_t vmax23 = vmaxq_u8(vi2, vi3);
const uint8x16_t vmax45 = vmaxq_u8(vi4, vi5);
const uint8x16_t vmax67 = vmaxq_u8(vi6, vi7);
const uint8x16_t vmax2345 = vmaxq_u8(vmax23, vmax45);
const uint8x16_t vmax0167 = vmaxq_u8(vmax01, vmax67);
const uint8x16_t vmax = vmaxq_u8(vmax2345, vmax0167);
const uint8x16_t vout =
vmaxq_u8(vminq_u8(vmax, voutput_max), voutput_min);
vst1q_u8(o, vout);
o += 16;
k -= 16;
}
if (k != 0) {
const size_t address_increment = k - 16;
i0 = (const uint8_t*)((uintptr_t)i0 + address_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + address_increment);
i2 = (const uint8_t*)((uintptr_t)i2 + address_increment);
i3 = (const uint8_t*)((uintptr_t)i3 + address_increment);
i4 = (const uint8_t*)((uintptr_t)i4 + address_increment);
i5 = (const uint8_t*)((uintptr_t)i5 + address_increment);
i6 = (const uint8_t*)((uintptr_t)i6 + address_increment);
i7 = (const uint8_t*)((uintptr_t)i7 + address_increment);
o = (uint8_t*)((uintptr_t)o + address_increment);
const uint8x16_t vi0 = vld1q_u8(i0);
const uint8x16_t vi1 = vld1q_u8(i1);
const uint8x16_t vi2 = vld1q_u8(i2);
const uint8x16_t vi3 = vld1q_u8(i3);
const uint8x16_t vi4 = vld1q_u8(i4);
const uint8x16_t vi5 = vld1q_u8(i5);
const uint8x16_t vi6 = vld1q_u8(i6);
const uint8x16_t vi7 = vld1q_u8(i7);
const uint8x16_t vo = vld1q_u8(o);
const uint8x16_t vmax01 = vmaxq_u8(vmaxq_u8(vi0, vi1), vo);
const uint8x16_t vmax23 = vmaxq_u8(vi2, vi3);
const uint8x16_t vmax45 = vmaxq_u8(vi4, vi5);
const uint8x16_t vmax67 = vmaxq_u8(vi6, vi7);
const uint8x16_t vmax2345 = vmaxq_u8(vmax23, vmax45);
const uint8x16_t vmax0167 = vmaxq_u8(vmax01, vmax67);
const uint8x16_t vmax = vmaxq_u8(vmax2345, vmax0167);
const uint8x16_t vout =
vmaxq_u8(vminq_u8(vmax, voutput_max), voutput_min);
vst1q_u8(o, vout);
o += 16;
}
}
input = (const uint8_t**)((uintptr_t)input + input_increment);
output = (uint8_t*)((uintptr_t)o + output_increment);
} while (--n != 0);
}
| 8,041
| 30.912698
| 76
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/u8maxpool/16x9p8q-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <emmintrin.h>
#include <qnnpack/u8maxpool.h>
void pytorch_u8maxpool_ukernel_16x9p8q__sse2(
size_t n,
size_t ks,
size_t kc,
const uint8_t** input,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union pytorch_qnnp_u8_clamping_params params[RESTRICT_STATIC 1]) {
assert(n != 0);
assert(ks != 0);
assert(kc >= 16);
const __m128i voutput_max =
_mm_load_si128((const __m128i*)params->sse2.output_max);
const __m128i voutput_min =
_mm_load_si128((const __m128i*)params->sse2.output_min);
do {
uint8_t* o = output;
{
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
const uint8_t* i8 = *input++;
if (ks < 2) {
i1 = i0;
}
if (ks <= 2) {
i2 = i0;
}
if (ks < 4) {
i3 = i0;
}
if (ks <= 4) {
i4 = i0;
}
if (ks < 6) {
i5 = i0;
}
if (ks <= 6) {
i6 = i0;
}
if (ks < 8) {
i7 = i0;
}
if (ks <= 8) {
i8 = i0;
}
size_t k = kc;
while (k >= 16) {
const __m128i vi0 = _mm_loadu_si128((const __m128i*)i0);
i0 += 16;
const __m128i vi1 = _mm_loadu_si128((const __m128i*)i1);
i1 += 16;
const __m128i vi2 = _mm_loadu_si128((const __m128i*)i2);
i2 += 16;
const __m128i vi3 = _mm_loadu_si128((const __m128i*)i3);
i3 += 16;
const __m128i vi4 = _mm_loadu_si128((const __m128i*)i4);
i4 += 16;
const __m128i vi5 = _mm_loadu_si128((const __m128i*)i5);
i5 += 16;
const __m128i vi6 = _mm_loadu_si128((const __m128i*)i6);
i6 += 16;
const __m128i vi7 = _mm_loadu_si128((const __m128i*)i7);
i7 += 16;
const __m128i vi8 = _mm_loadu_si128((const __m128i*)i8);
i8 += 16;
const __m128i vmax018 = _mm_max_epu8(_mm_max_epu8(vi0, vi1), vi8);
const __m128i vmax23 = _mm_max_epu8(vi2, vi3);
const __m128i vmax45 = _mm_max_epu8(vi4, vi5);
const __m128i vmax67 = _mm_max_epu8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epu8(vmax23, vmax45);
const __m128i vmax01678 = _mm_max_epu8(vmax018, vmax67);
const __m128i vmax = _mm_max_epu8(vmax2345, vmax01678);
const __m128i vout =
_mm_max_epu8(_mm_min_epu8(vmax, voutput_max), voutput_min);
_mm_storeu_si128((__m128i*)o, vout);
o += 16;
k -= 16;
}
if (k != 0) {
const size_t address_increment = k - 16;
i0 = (const uint8_t*)((uintptr_t)i0 + address_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + address_increment);
i2 = (const uint8_t*)((uintptr_t)i2 + address_increment);
i3 = (const uint8_t*)((uintptr_t)i3 + address_increment);
i4 = (const uint8_t*)((uintptr_t)i4 + address_increment);
i5 = (const uint8_t*)((uintptr_t)i5 + address_increment);
i6 = (const uint8_t*)((uintptr_t)i6 + address_increment);
i7 = (const uint8_t*)((uintptr_t)i7 + address_increment);
i8 = (const uint8_t*)((uintptr_t)i8 + address_increment);
o = (uint8_t*)((uintptr_t)o + address_increment);
const __m128i vi0 = _mm_loadu_si128((const __m128i*)i0);
const __m128i vi1 = _mm_loadu_si128((const __m128i*)i1);
const __m128i vi2 = _mm_loadu_si128((const __m128i*)i2);
const __m128i vi3 = _mm_loadu_si128((const __m128i*)i3);
const __m128i vi4 = _mm_loadu_si128((const __m128i*)i4);
const __m128i vi5 = _mm_loadu_si128((const __m128i*)i5);
const __m128i vi6 = _mm_loadu_si128((const __m128i*)i6);
const __m128i vi7 = _mm_loadu_si128((const __m128i*)i7);
const __m128i vi8 = _mm_loadu_si128((const __m128i*)i8);
const __m128i vmax018 = _mm_max_epu8(_mm_max_epu8(vi0, vi1), vi8);
const __m128i vmax23 = _mm_max_epu8(vi2, vi3);
const __m128i vmax45 = _mm_max_epu8(vi4, vi5);
const __m128i vmax67 = _mm_max_epu8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epu8(vmax23, vmax45);
const __m128i vmax01678 = _mm_max_epu8(vmax018, vmax67);
const __m128i vmax = _mm_max_epu8(vmax2345, vmax01678);
const __m128i vout =
_mm_max_epu8(_mm_min_epu8(vmax, voutput_max), voutput_min);
_mm_storeu_si128((__m128i*)o, vout);
o += 16;
}
}
for (ptrdiff_t m = (ptrdiff_t)ks - 9; m > 0; m -= 8) {
const uint8_t* i0 = *input++;
const uint8_t* i1 = *input++;
const uint8_t* i2 = *input++;
const uint8_t* i3 = *input++;
const uint8_t* i4 = *input++;
const uint8_t* i5 = *input++;
const uint8_t* i6 = *input++;
const uint8_t* i7 = *input++;
if (m < 2) {
i1 = i0;
}
if (m <= 2) {
i2 = i0;
}
if (m < 4) {
i3 = i0;
}
if (m <= 4) {
i4 = i0;
}
if (m < 6) {
i5 = i0;
}
if (m <= 6) {
i6 = i0;
}
if (m < 8) {
i7 = i0;
}
o = output;
size_t k = kc;
while (k >= 16) {
const __m128i vi0 = _mm_loadu_si128((const __m128i*)i0);
i0 += 16;
const __m128i vi1 = _mm_loadu_si128((const __m128i*)i1);
i1 += 16;
const __m128i vi2 = _mm_loadu_si128((const __m128i*)i2);
i2 += 16;
const __m128i vi3 = _mm_loadu_si128((const __m128i*)i3);
i3 += 16;
const __m128i vi4 = _mm_loadu_si128((const __m128i*)i4);
i4 += 16;
const __m128i vi5 = _mm_loadu_si128((const __m128i*)i5);
i5 += 16;
const __m128i vi6 = _mm_loadu_si128((const __m128i*)i6);
i6 += 16;
const __m128i vi7 = _mm_loadu_si128((const __m128i*)i7);
i7 += 16;
const __m128i vo = _mm_loadu_si128((const __m128i*)o);
const __m128i vmax01 = _mm_max_epu8(_mm_max_epu8(vi0, vi1), vo);
const __m128i vmax23 = _mm_max_epu8(vi2, vi3);
const __m128i vmax45 = _mm_max_epu8(vi4, vi5);
const __m128i vmax67 = _mm_max_epu8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epu8(vmax23, vmax45);
const __m128i vmax0167 = _mm_max_epu8(vmax01, vmax67);
const __m128i vmax = _mm_max_epu8(vmax2345, vmax0167);
const __m128i vout =
_mm_max_epu8(_mm_min_epu8(vmax, voutput_max), voutput_min);
_mm_storeu_si128((__m128i*)o, vout);
o += 16;
k -= 16;
}
if (k != 0) {
const size_t address_increment = k - 16;
i0 = (const uint8_t*)((uintptr_t)i0 + address_increment);
i1 = (const uint8_t*)((uintptr_t)i1 + address_increment);
i2 = (const uint8_t*)((uintptr_t)i2 + address_increment);
i3 = (const uint8_t*)((uintptr_t)i3 + address_increment);
i4 = (const uint8_t*)((uintptr_t)i4 + address_increment);
i5 = (const uint8_t*)((uintptr_t)i5 + address_increment);
i6 = (const uint8_t*)((uintptr_t)i6 + address_increment);
i7 = (const uint8_t*)((uintptr_t)i7 + address_increment);
o = (uint8_t*)((uintptr_t)o + address_increment);
const __m128i vi0 = _mm_loadu_si128((const __m128i*)i0);
const __m128i vi1 = _mm_loadu_si128((const __m128i*)i1);
const __m128i vi2 = _mm_loadu_si128((const __m128i*)i2);
const __m128i vi3 = _mm_loadu_si128((const __m128i*)i3);
const __m128i vi4 = _mm_loadu_si128((const __m128i*)i4);
const __m128i vi5 = _mm_loadu_si128((const __m128i*)i5);
const __m128i vi6 = _mm_loadu_si128((const __m128i*)i6);
const __m128i vi7 = _mm_loadu_si128((const __m128i*)i7);
const __m128i vo = _mm_loadu_si128((const __m128i*)o);
const __m128i vmax01 = _mm_max_epu8(_mm_max_epu8(vi0, vi1), vo);
const __m128i vmax23 = _mm_max_epu8(vi2, vi3);
const __m128i vmax45 = _mm_max_epu8(vi4, vi5);
const __m128i vmax67 = _mm_max_epu8(vi6, vi7);
const __m128i vmax2345 = _mm_max_epu8(vmax23, vmax45);
const __m128i vmax0167 = _mm_max_epu8(vmax01, vmax67);
const __m128i vmax = _mm_max_epu8(vmax2345, vmax0167);
const __m128i vout =
_mm_max_epu8(_mm_min_epu8(vmax, voutput_max), voutput_min);
_mm_storeu_si128((__m128i*)o, vout);
o += 16;
}
}
input = (const uint8_t**)((uintptr_t)input + input_increment);
output = (uint8_t*)((uintptr_t)o + output_increment);
} while (--n != 0);
}
| 8,939
| 34.058824
| 76
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/u8maxpool/sub16-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <arm_neon.h>
#include <qnnpack/u8maxpool.h>
void pytorch_u8maxpool_ukernel_sub16__neon(
size_t n,
size_t ks,
size_t kc,
const uint8_t** input,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union pytorch_qnnp_u8_clamping_params params[restrict static 1]) {
assert(n != 0);
assert(ks != 0);
assert(kc != 0);
assert(kc < 16);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
do {
uint8x16_t vmax = vmovq_n_u8(0);
size_t m = ks;
do {
const uint8_t* i = *input++;
i += kc;
uint8x16_t vi = vmax;
if (kc & 1) {
i -= 1;
vi = vld1q_lane_u8(i, vi, 0);
}
if (kc & 2) {
vi = vextq_u8(vi, vi, 14);
i -= 2;
vi = vreinterpretq_u8_u16(vld1q_lane_u16(
__builtin_assume_aligned(i, 1), vreinterpretq_u16_u8(vi), 0));
}
if (kc & 4) {
vi = vextq_u8(vi, vi, 12);
i -= 4;
vi = vreinterpretq_u8_u32(vld1q_lane_u32(
__builtin_assume_aligned(i, 1), vreinterpretq_u32_u8(vi), 0));
}
if (kc & 8) {
i -= 8;
vi = vcombine_u8(vld1_u8(i), vget_low_u8(vi));
}
vmax = vmaxq_u8(vmax, vi);
} while (--m != 0);
input = (const uint8_t**)((uintptr_t)input + input_increment);
vmax = vminq_u8(vmax, voutput_max);
vmax = vmaxq_u8(vmax, voutput_min);
uint8x8_t vout = vget_low_u8(vmax);
if (kc & 8) {
vst1_u8(output, vout);
output += 8;
vout = vget_high_u8(vmax);
}
if (kc & 4) {
vst1_lane_u32(
__builtin_assume_aligned(output, 1), vreinterpret_u32_u8(vout), 0);
output += 4;
vout = vext_u8(vout, vout, 4);
}
if (kc & 2) {
vst1_lane_u16(
__builtin_assume_aligned(output, 1), vreinterpret_u16_u8(vout), 0);
output += 2;
vout = vext_u8(vout, vout, 2);
}
if (kc & 1) {
vst1_lane_u8(output, vout, 0);
output += 1;
}
output = (uint8_t*)((uintptr_t)output + output_increment);
} while (--n != 0);
}
| 2,404
| 25.141304
| 77
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/u8maxpool/sub16-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <emmintrin.h>
#include <qnnpack/u8maxpool.h>
void pytorch_u8maxpool_ukernel_sub16__sse2(
size_t n,
size_t ks,
size_t kc,
const uint8_t** input,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union pytorch_qnnp_u8_clamping_params params[RESTRICT_STATIC 1]) {
assert(n != 0);
assert(ks != 0);
assert(kc != 0);
assert(kc < 16);
const __m128i voutput_max =
_mm_load_si128((const __m128i*)params->sse2.output_max);
const __m128i voutput_min =
_mm_load_si128((const __m128i*)params->sse2.output_min);
do {
__m128i vmax = _mm_setzero_si128();
size_t m = ks;
do {
const uint8_t* i = *input++;
i += kc;
__m128i vi = vmax;
if (kc & 1) {
i -= 1;
vi = _mm_cvtsi32_si128(*i);
}
if (kc & 2) {
vi = _mm_slli_epi32(vi, 16);
i -= 2;
vi = _mm_insert_epi16(vi, *((const uint16_t*)i), 0);
}
if (kc & 4) {
i -= 4;
vi = _mm_unpacklo_epi32(
_mm_cvtsi32_si128((int)*((const uint32_t*)i)), vi);
}
if (kc & 8) {
i -= 8;
vi = _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i*)i), vi);
}
vmax = _mm_max_epu8(vmax, vi);
} while (--m != 0);
input = (const uint8_t**)((uintptr_t)input + input_increment);
__m128i vout = _mm_max_epu8(_mm_min_epu8(vmax, voutput_max), voutput_min);
if (kc & 8) {
_mm_storel_epi64((__m128i*)output, vout);
output += 8;
vout = _mm_unpackhi_epi64(vout, vout);
}
if (kc & 4) {
*((uint32_t*)output) = (uint32_t)_mm_cvtsi128_si32(vout);
output += 4;
vout = _mm_srli_epi64(vout, 32);
}
if (kc & 2) {
*((uint16_t*)output) = (uint16_t)_mm_extract_epi16(vout, 0);
output += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (kc & 1) {
*((uint8_t*)output) = (uint8_t)_mm_cvtsi128_si32(vout);
output += 1;
}
output = (uint8_t*)((uintptr_t)output + output_increment);
} while (--n != 0);
}
| 2,291
| 25.344828
| 78
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/u8rmax/neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <arm_neon.h>
#include <qnnpack/u8rmax.h>
uint8_t pytorch_u8rmax_ukernel__neon(size_t n, const uint8_t* x) {
assert(n != 0);
if
PYTORCH_QNNP_LIKELY(n >= 16) {
uint8x16_t vmax = vmovq_n_u8(0);
do {
const uint8x16_t vx = vld1q_u8(x);
x += 16;
vmax = vmaxq_u8(vmax, vx);
n -= 16;
} while (n >= 16);
if (n != 0) {
const size_t x_increment = n - 16;
x = (const uint8_t*)((uintptr_t)x + x_increment);
const uint8x16_t vx = vld1q_u8(x);
vmax = vmaxq_u8(vmax, vx);
}
uint8x8_t vmax8 = vmax_u8(vget_low_u8(vmax), vget_high_u8(vmax));
const uint8x8_t vmax4 = vpmax_u8(vmax8, vmax8);
const uint8x8_t vmax2 = vpmax_u8(vmax4, vmax4);
const uint8x8_t vmax1 = vpmax_u8(vmax2, vmax2);
return vget_lane_u8(vmax1, 0);
}
else {
uint8x8_t vmax = vmov_n_u8(0);
do {
const uint8x8_t vx = vld1_dup_u8(x);
x += 1;
vmax = vmax_u8(vmax, vx);
} while (--n != 0);
return vget_lane_u8(vmax, 0);
}
}
| 1,295
| 25.44898
| 72
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/u8rmax/sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <emmintrin.h>
#include <qnnpack/u8rmax.h>
uint8_t pytorch_u8rmax_ukernel__sse2(size_t n, const uint8_t* x) {
assert(n != 0);
if
PYTORCH_QNNP_LIKELY(n >= 16) {
__m128i vmax = _mm_setzero_si128();
do {
const __m128i vx = _mm_loadu_si128((const __m128i*)x);
x += 16;
vmax = _mm_max_epu8(vmax, vx);
n -= 16;
} while (n >= 16);
if (n != 0) {
const size_t x_increment = n - 16;
x = (const uint8_t*)((uintptr_t)x + x_increment);
const __m128i vx = _mm_loadu_si128((const __m128i*)x);
vmax = _mm_max_epu8(vmax, vx);
}
vmax = _mm_max_epu8(vmax, _mm_unpackhi_epi64(vmax, vmax));
vmax = _mm_max_epu8(vmax, _mm_srli_epi64(vmax, 32));
vmax = _mm_max_epu8(vmax, _mm_srli_epi32(vmax, 16));
vmax = _mm_max_epu8(vmax, _mm_srli_epi16(vmax, 8));
return (uint8_t)_mm_cvtsi128_si32(vmax);
}
else {
uint8_t vmax = 0;
do {
const uint8_t vx = *x++;
vmax = vx > vmax ? vx : vmax;
} while (--n != 0);
return vmax;
}
}
| 1,312
| 26.354167
| 72
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/x8zip/x2-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <arm_neon.h>
#include <qnnpack/x8zip.h>
void pytorch_qnnp_x8zip_x2__neon(size_t n, const void* input, void* output) {
const uint8_t* x = input;
const uint8_t* y = x + n;
uint8_t* o = output;
if (n >= 8) {
do {
uint8x8x2_t vxy;
vxy.val[0] = vld1_u8(x);
x += 8;
vxy.val[1] = vld1_u8(y);
y += 8;
vst2_u8(o, vxy);
o += 16;
;
n -= 8;
} while (n >= 8);
if (n != 0) {
const size_t address_increment = n - 8;
uint8x8x2_t vxy;
vxy.val[0] = vld1_u8((const uint8_t*)((uintptr_t)x + address_increment));
vxy.val[1] = vld1_u8((const uint8_t*)((uintptr_t)y + address_increment));
vst2_u8((uint8_t*)((uintptr_t)o + address_increment * 2), vxy);
}
} else {
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
o[0] = vx;
o[1] = vy;
o += 2;
} while (--n != 0);
}
}
| 1,131
| 23.085106
| 79
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/x8zip/x2-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <emmintrin.h>
#include <qnnpack/x8zip.h>
void pytorch_qnnp_x8zip_x2__sse2(size_t n, const void* input, void* output) {
const uint8_t* x = input;
const uint8_t* y = x + n;
uint8_t* o = output;
if (n >= 16) {
do {
const __m128i vx = _mm_loadu_si128((const __m128i*)x);
x += 16;
const __m128i vy = _mm_loadu_si128((const __m128i*)y);
y += 16;
const __m128i vxy_lo = _mm_unpacklo_epi8(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi8(vx, vy);
_mm_storeu_si128((__m128i*)o, vxy_lo);
_mm_storeu_si128((__m128i*)(o + 16), vxy_hi);
o = (void*)((uintptr_t)o + 32);
n -= 16;
} while (n >= 16);
if (n != 0) {
const size_t address_increment = n - 16;
const __m128i vx =
_mm_loadu_si128((const __m128i*)((uintptr_t)x + address_increment));
const __m128i vy =
_mm_loadu_si128((const __m128i*)((uintptr_t)y + address_increment));
const __m128i vxy_lo = _mm_unpacklo_epi8(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi8(vx, vy);
o = (void*)((uintptr_t)o + address_increment * 2);
_mm_storeu_si128((__m128i*)o, vxy_lo);
_mm_storeu_si128((__m128i*)o + 1, vxy_hi);
}
} else {
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
o[0] = vx;
o[1] = vy;
o += 2;
} while (--n != 0);
}
}
| 1,594
| 29.09434
| 78
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/x8zip/x3-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <arm_neon.h>
#include <qnnpack/x8zip.h>
void pytorch_qnnp_x8zip_x3__neon(size_t n, const void* input, void* output) {
const uint8_t* x = input;
const uint8_t* y = x + n;
const uint8_t* z = y + n;
uint8_t* o = output;
if (n >= 8) {
do {
uint8x8x3_t vxyz;
vxyz.val[0] = vld1_u8(x);
x += 8;
vxyz.val[1] = vld1_u8(y);
y += 8;
vxyz.val[2] = vld1_u8(z);
z += 8;
vst3_u8(o, vxyz);
o += 24;
n -= 8;
} while (n >= 8);
if (n != 0) {
const size_t address_increment = n - 8;
uint8x8x3_t vxyz;
vxyz.val[0] = vld1_u8(x + address_increment);
vxyz.val[1] = vld1_u8(y + address_increment);
vxyz.val[2] = vld1_u8(z + address_increment);
vst3_u8((uint8_t*)((uintptr_t)o + address_increment * 3), vxyz);
}
} else {
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
const uint8_t vz = *z++;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o += 3;
} while (--n != 0);
}
}
| 1,247
| 23
| 77
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/x8zip/x3-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <emmintrin.h>
#include <qnnpack/x8zip.h>
void pytorch_qnnp_x8zip_x3__sse2(size_t n, const void* input, void* output) {
const uint8_t* x = input;
const uint8_t* y = x + n;
const uint8_t* z = y + n;
uint8_t* o = output;
if (n >= 16) {
const __m128i vmask0x00FF00FF = _mm_set1_epi16(0x00FF);
const __m128i vmask0x0000FFFF = _mm_set1_epi32(0x0000FFFF);
do {
/* vx = ( x15, x14, x13, x12, x11, x10, x9, x8, x7, x6, x5, x4, x3,
* x2, x1, x0 ) */
const __m128i vx = _mm_loadu_si128((const __m128i*)x);
x += 16;
/* vy = ( y15, y14, y13, y12, y11, y10, y9, y8, y7, y6, y5, y4, y3,
* y2, y1, y0 ) */
const __m128i vy = _mm_loadu_si128((const __m128i*)y);
y += 16;
/* vz = ( z15, z14, z13, z12, z11, z10, z9, z8, z7, z6, z5, z4, z3,
* z2, z1, z0 ) */
const __m128i vz = _mm_loadu_si128((const __m128i*)z);
z += 16;
/* vxeye = ( y14, x14, y12, x12, y10, x10, y8, x8, y6, x6, y4,
* x4, y2, x2, y0, x0 ) */
const __m128i vxeye = _mm_or_si128(
_mm_and_si128(vx, vmask0x00FF00FF), _mm_slli_epi16(vy, 8));
/* vyozo = ( z15, y15, z13, y13, z11, y11, z9, y9, z7, y7, z5,
* y5, z3, y3, z1, y1 ) */
const __m128i vyozo = _mm_or_si128(
_mm_andnot_si128(vmask0x00FF00FF, vz), _mm_srli_epi16(vy, 8));
/* vzoxo = ( x15, z14, x13, z12, x11, z10, x9, z8, x7, z6, x5,
* z4, x3, z2, x1, z0 ) */
const __m128i vzexo = _mm_or_si128(
_mm_and_si128(vz, vmask0x00FF00FF),
_mm_andnot_si128(vmask0x00FF00FF, vx));
/* vxeyezexo = ( x13, z12, y12, x12, x9, z8, y8, x8, x5, z4, y4,
* x4, x1, z0, y0, x0 ) */
const __m128i vxeyezexo = _mm_or_si128(
_mm_and_si128(vxeye, vmask0x0000FFFF), _mm_slli_epi32(vzexo, 16));
/* vyozoxeye = ( y14, x14, z13, y13, y10, x10, z9, y9, y6, x6, z5,
* y5, y2, x2, z1, y1 ) */
const __m128i vyozoxeye = _mm_or_si128(
_mm_and_si128(vyozo, vmask0x0000FFFF),
_mm_andnot_si128(vmask0x0000FFFF, vxeye));
/* vzexoyozo = ( z15, y15, x15, z14, z11, y11, x11, z10, z7, y7, x7,
* z6, z3, y3, x3, z2 ) */
const __m128i vzexoyozo = _mm_or_si128(
_mm_andnot_si128(vmask0x0000FFFF, vyozo), _mm_srli_epi32(vzexo, 16));
/* vtemp0 = ( x13, z12, y12, x12, x5, z4, y4, x4, z11, y11, x11,
* z10, z3, y3, x3, z2 ) */
const __m128i vtemp0 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(vzexoyozo),
_mm_castsi128_ps(vxeyezexo),
_MM_SHUFFLE(3, 1, 2, 0)));
/* vtemp1 = ( y10, x10, z9, y9, y2, x2, z1, y1, x9, z8, y8,
* x8, x1, z0, y0, x0 ) */
const __m128i vtemp1 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(vxeyezexo),
_mm_castsi128_ps(vyozoxeye),
_MM_SHUFFLE(2, 0, 2, 0)));
/* vtemp2 = ( z15, y15, x15, z14, z7, y7, x7, z6, y14, x14, z13,
* y13, y6, x6, z5, y5 ) */
const __m128i vtemp2 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(vyozoxeye),
_mm_castsi128_ps(vzexoyozo),
_MM_SHUFFLE(3, 1, 3, 1)));
/* vxyz0 = ( x5, z4, y4, x4, z3, y3, x3, z2, y2, x2, z1,
* y1, x1, z0, y0, x0 ) */
const __m128i vxyz0 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(vtemp1),
_mm_castsi128_ps(vtemp0),
_MM_SHUFFLE(2, 0, 2, 0)));
/* vxyz1 = ( y10, x10, z9, y9, x9, z8, y8, x8, z7, y7, x7,
* z6, y6, x6, z5, y5 ) */
const __m128i vxyz1 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(vtemp2),
_mm_castsi128_ps(vtemp1),
_MM_SHUFFLE(3, 1, 2, 0)));
/* vxyz2 = ( z15, y15, x15, z14, y14, x14, z13, y13, x13, z12, y12,
* x12, z11, y11, x11, z10 ) */
const __m128i vxyz2 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(vtemp0),
_mm_castsi128_ps(vtemp2),
_MM_SHUFFLE(3, 1, 3, 1)));
_mm_storeu_si128((__m128i*)o, vxyz0);
_mm_storeu_si128((__m128i*)o + 1, vxyz1);
_mm_storeu_si128((__m128i*)o + 2, vxyz2);
o += 48;
n -= 16;
} while (n >= 16);
if (n != 0) {
const size_t address_increment = n - 16;
/* vx = ( x15, x14, x13, x12, x11, x10, x9, x8, x7, x6, x5, x4, x3,
* x2, x1, x0 ) */
const __m128i vx =
_mm_loadu_si128((const __m128i*)((uintptr_t)x + address_increment));
/* vy = ( y15, y14, y13, y12, y11, y10, y9, y8, y7, y6, y5, y4, y3,
* y2, y1, y0 ) */
const __m128i vy =
_mm_loadu_si128((const __m128i*)((uintptr_t)y + address_increment));
/* vz = ( z15, z14, z13, z12, z11, z10, z9, z8, z7, z6, z5, z4, z3,
* z2, z1, z0 ) */
const __m128i vz =
_mm_loadu_si128((const __m128i*)((uintptr_t)z + address_increment));
/* vxeye = ( y14, x14, y12, x12, y10, x10, y8, x8, y6, x6, y4,
* x4, y2, x2, y0, x0 ) */
const __m128i vxeye = _mm_or_si128(
_mm_and_si128(vx, vmask0x00FF00FF), _mm_slli_epi16(vy, 8));
/* vyozo = ( z15, y15, z13, y13, z11, y11, z9, y9, z7, y7, z5,
* y5, z3, y3, z1, y1 ) */
const __m128i vyozo = _mm_or_si128(
_mm_andnot_si128(vmask0x00FF00FF, vz), _mm_srli_epi16(vy, 8));
/* vzoxo = ( x15, z14, x13, z12, x11, z10, x9, z8, x7, z6, x5,
* z4, x3, z2, x1, z0 ) */
const __m128i vzexo = _mm_or_si128(
_mm_and_si128(vz, vmask0x00FF00FF),
_mm_andnot_si128(vmask0x00FF00FF, vx));
/* vxeyezexo = ( x13, z12, y12, x12, x9, z8, y8, x8, x5, z4, y4,
* x4, x1, z0, y0, x0 ) */
const __m128i vxeyezexo = _mm_or_si128(
_mm_and_si128(vxeye, vmask0x0000FFFF), _mm_slli_epi32(vzexo, 16));
/* vyozoxeye = ( y14, x14, z13, y13, y10, x10, z9, y9, y6, x6, z5,
* y5, y2, x2, z1, y1 ) */
const __m128i vyozoxeye = _mm_or_si128(
_mm_and_si128(vyozo, vmask0x0000FFFF),
_mm_andnot_si128(vmask0x0000FFFF, vxeye));
/* vzexoyozo = ( z15, y15, x15, z14, z11, y11, x11, z10, z7, y7, x7,
* z6, z3, y3, x3, z2 ) */
const __m128i vzexoyozo = _mm_or_si128(
_mm_andnot_si128(vmask0x0000FFFF, vyozo), _mm_srli_epi32(vzexo, 16));
/* vtemp0 = ( x13, z12, y12, x12, x5, z4, y4, x4, z11, y11, x11,
* z10, z3, y3, x3, z2 ) */
const __m128i vtemp0 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(vzexoyozo),
_mm_castsi128_ps(vxeyezexo),
_MM_SHUFFLE(3, 1, 2, 0)));
/* vtemp1 = ( y10, x10, z9, y9, y2, x2, z1, y1, x9, z8, y8,
* x8, x1, z0, y0, x0 ) */
const __m128i vtemp1 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(vxeyezexo),
_mm_castsi128_ps(vyozoxeye),
_MM_SHUFFLE(2, 0, 2, 0)));
/* vtemp2 = ( z15, y15, x15, z14, z7, y7, x7, z6, y14, x14, z13,
* y13, y6, x6, z5, y5 ) */
const __m128i vtemp2 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(vyozoxeye),
_mm_castsi128_ps(vzexoyozo),
_MM_SHUFFLE(3, 1, 3, 1)));
/* vxyz0 = ( x5, z4, y4, x4, z3, y3, x3, z2, y2, x2, z1,
* y1, x1, z0, y0, x0 ) */
const __m128i vxyz0 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(vtemp1),
_mm_castsi128_ps(vtemp0),
_MM_SHUFFLE(2, 0, 2, 0)));
/* vxyz1 = ( y10, x10, z9, y9, x9, z8, y8, x8, z7, y7, x7,
* z6, y6, x6, z5, y5 ) */
const __m128i vxyz1 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(vtemp2),
_mm_castsi128_ps(vtemp1),
_MM_SHUFFLE(3, 1, 2, 0)));
/* vxyz2 = ( z15, y15, x15, z14, y14, x14, z13, y13, x13, z12, y12,
* x12, z11, y11, x11, z10 ) */
const __m128i vxyz2 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(vtemp0),
_mm_castsi128_ps(vtemp2),
_MM_SHUFFLE(3, 1, 3, 1)));
o = (uint8_t*)((uintptr_t)o + address_increment * 3);
_mm_storeu_si128((__m128i*)o, vxyz0);
_mm_storeu_si128((__m128i*)o + 1, vxyz1);
_mm_storeu_si128((__m128i*)o + 2, vxyz2);
}
} else {
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
const uint8_t vz = *z++;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o += 3;
} while (--n != 0);
}
}
| 8,837
| 41.902913
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/x8zip/x4-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <arm_neon.h>
#include <qnnpack/x8zip.h>
void pytorch_qnnp_x8zip_x4__neon(size_t n, const void* input, void* output) {
const uint8_t* x = input;
const uint8_t* y = x + n;
const uint8_t* z = y + n;
const uint8_t* w = z + n;
uint8_t* o = output;
if (n >= 8) {
do {
uint8x8x4_t vxyzw;
vxyzw.val[0] = vld1_u8(x);
x += 8;
vxyzw.val[1] = vld1_u8(y);
y += 8;
vxyzw.val[2] = vld1_u8(z);
z += 8;
vxyzw.val[3] = vld1_u8(w);
w += 8;
vst4_u8(o, vxyzw);
o += 32;
n -= 8;
} while (n >= 8);
if (n != 0) {
const size_t address_increment = n - 8;
uint8x8x4_t vxyzw;
vxyzw.val[0] = vld1_u8(x + address_increment);
vxyzw.val[1] = vld1_u8(y + address_increment);
vxyzw.val[2] = vld1_u8(z + address_increment);
vxyzw.val[3] = vld1_u8(w + address_increment);
vst4_u8((uint8_t*)((uintptr_t)o + address_increment * 4), vxyzw);
}
} else {
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
const uint8_t vz = *z++;
const uint8_t vw = *w++;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o[3] = vw;
o += 4;
} while (--n != 0);
}
}
| 1,433
| 23.724138
| 77
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/x8zip/x4-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <emmintrin.h>
#include <qnnpack/x8zip.h>
void pytorch_qnnp_x8zip_x4__sse2(size_t n, const void* input, void* output) {
const uint8_t* x = input;
const uint8_t* y = x + n;
const uint8_t* z = y + n;
const uint8_t* w = z + n;
uint8_t* o = output;
if (n >= 16) {
do {
const __m128i vx = _mm_loadu_si128((const __m128i*)x);
x += 16;
const __m128i vy = _mm_loadu_si128((const __m128i*)y);
y += 16;
const __m128i vz = _mm_loadu_si128((const __m128i*)z);
z += 16;
const __m128i vw = _mm_loadu_si128((const __m128i*)w);
w += 16;
const __m128i vxy_lo = _mm_unpacklo_epi8(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi8(vx, vy);
const __m128i vzw_lo = _mm_unpacklo_epi8(vz, vw);
const __m128i vzw_hi = _mm_unpackhi_epi8(vz, vw);
const __m128i vxyzw0 = _mm_unpacklo_epi16(vxy_lo, vzw_lo);
const __m128i vxyzw1 = _mm_unpackhi_epi16(vxy_lo, vzw_lo);
const __m128i vxyzw2 = _mm_unpacklo_epi16(vxy_hi, vzw_hi);
const __m128i vxyzw3 = _mm_unpackhi_epi16(vxy_hi, vzw_hi);
_mm_storeu_si128((__m128i*)o, vxyzw0);
_mm_storeu_si128((__m128i*)o + 1, vxyzw1);
_mm_storeu_si128((__m128i*)o + 2, vxyzw2);
_mm_storeu_si128((__m128i*)o + 3, vxyzw3);
o = (void*)((uintptr_t)o + 64);
n -= 16;
} while (n >= 16);
if (n != 0) {
const size_t address_increment = n - 16;
const __m128i vx =
_mm_loadu_si128((const __m128i*)((uintptr_t)x + address_increment));
const __m128i vy =
_mm_loadu_si128((const __m128i*)((uintptr_t)y + address_increment));
const __m128i vz =
_mm_loadu_si128((const __m128i*)((uintptr_t)z + address_increment));
const __m128i vw =
_mm_loadu_si128((const __m128i*)((uintptr_t)w + address_increment));
const __m128i vxy_lo = _mm_unpacklo_epi8(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi8(vx, vy);
const __m128i vzw_lo = _mm_unpacklo_epi8(vz, vw);
const __m128i vzw_hi = _mm_unpackhi_epi8(vz, vw);
const __m128i vxyzw0 = _mm_unpacklo_epi16(vxy_lo, vzw_lo);
const __m128i vxyzw1 = _mm_unpackhi_epi16(vxy_lo, vzw_lo);
const __m128i vxyzw2 = _mm_unpacklo_epi16(vxy_hi, vzw_hi);
const __m128i vxyzw3 = _mm_unpackhi_epi16(vxy_hi, vzw_hi);
o = (void*)((uintptr_t)o + address_increment * 4);
_mm_storeu_si128((__m128i*)o, vxyzw0);
_mm_storeu_si128((__m128i*)o + 1, vxyzw1);
_mm_storeu_si128((__m128i*)o + 2, vxyzw2);
_mm_storeu_si128((__m128i*)o + 3, vxyzw3);
}
} else {
do {
const uint8_t vx = *x++;
const uint8_t vy = *y++;
const uint8_t vz = *z++;
const uint8_t vw = *w++;
o[0] = vx;
o[1] = vy;
o[2] = vz;
o[3] = vw;
o += 4;
} while (--n != 0);
}
}
| 3,043
| 35.674699
| 78
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/x8zip/xm-neon.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <arm_neon.h>
#include <qnnpack/x8zip.h>
void pytorch_qnnp_x8zip_xm__neon(
size_t n,
size_t m,
const void* input,
void* output) {
const uint8_t* w = input;
const size_t input_increment = n * 3;
const size_t output_increment = 4 - m * n;
const uint8_t* last_input = w + n * (m - 1);
void* last_output = (void*)((uintptr_t)output + (m - 4));
if (n >= 8) {
for (size_t i = 0; i < m; i += 4) {
size_t k = n;
w = (const uint8_t*)((uintptr_t)w + input_increment);
if (w >= last_input) {
w = last_input;
}
const uint8_t* z = (const uint8_t*)((uintptr_t)w - n);
const uint8_t* y = (const uint8_t*)((uintptr_t)z - n);
const uint8_t* x = (const uint8_t*)((uintptr_t)y - n);
while (k >= 8) {
const uint8x8_t vx = vld1_u8(x);
x += 8;
const uint8x8_t vy = vld1_u8(y);
y += 8;
const uint8x8_t vz = vld1_u8(z);
z += 8;
const uint8x8_t vw = vld1_u8(w);
w += 8;
const uint8x8x2_t vxy = vzip_u8(vx, vy);
const uint8x8x2_t vzw = vzip_u8(vz, vw);
const uint16x4x2_t vxyzw_lo = vzip_u16(
vreinterpret_u16_u8(vxy.val[0]), vreinterpret_u16_u8(vzw.val[0]));
const uint16x4x2_t vxyzw_hi = vzip_u16(
vreinterpret_u16_u8(vxy.val[1]), vreinterpret_u16_u8(vzw.val[1]));
vst1_lane_u32(
__builtin_assume_aligned(output, 1),
vreinterpret_u32_u16(vxyzw_lo.val[0]),
0);
output = (void*)((uintptr_t)output + m);
vst1_lane_u32(
__builtin_assume_aligned(output, 1),
vreinterpret_u32_u16(vxyzw_lo.val[0]),
1);
output = (void*)((uintptr_t)output + m);
vst1_lane_u32(
__builtin_assume_aligned(output, 1),
vreinterpret_u32_u16(vxyzw_lo.val[1]),
0);
output = (void*)((uintptr_t)output + m);
vst1_lane_u32(
__builtin_assume_aligned(output, 1),
vreinterpret_u32_u16(vxyzw_lo.val[1]),
1);
output = (void*)((uintptr_t)output + m);
vst1_lane_u32(
__builtin_assume_aligned(output, 1),
vreinterpret_u32_u16(vxyzw_hi.val[0]),
0);
output = (void*)((uintptr_t)output + m);
vst1_lane_u32(
__builtin_assume_aligned(output, 1),
vreinterpret_u32_u16(vxyzw_hi.val[0]),
1);
output = (void*)((uintptr_t)output + m);
vst1_lane_u32(
__builtin_assume_aligned(output, 1),
vreinterpret_u32_u16(vxyzw_hi.val[1]),
0);
output = (void*)((uintptr_t)output + m);
vst1_lane_u32(
__builtin_assume_aligned(output, 1),
vreinterpret_u32_u16(vxyzw_hi.val[1]),
1);
output = (void*)((uintptr_t)output + m);
k -= 8;
}
if (k != 0) {
const size_t address_increment = k - 8;
x = (const uint8_t*)((uintptr_t)x + address_increment);
y = (const uint8_t*)((uintptr_t)y + address_increment);
z = (const uint8_t*)((uintptr_t)z + address_increment);
w = (const uint8_t*)((uintptr_t)w + address_increment);
const int64x1_t vshift = vmov_n_s64(8 * address_increment);
const uint64x1_t vx = vshl_u64(vreinterpret_u64_u8(vld1_u8(x)), vshift);
const uint64x1_t vy = vshl_u64(vreinterpret_u64_u8(vld1_u8(y)), vshift);
const uint64x1_t vz = vshl_u64(vreinterpret_u64_u8(vld1_u8(z)), vshift);
const uint64x1_t vw = vshl_u64(vreinterpret_u64_u8(vld1_u8(w)), vshift);
w += 8;
const uint8x8x2_t vxy =
vzip_u8(vreinterpret_u8_u64(vx), vreinterpret_u8_u64(vy));
const uint8x8x2_t vzw =
vzip_u8(vreinterpret_u8_u64(vz), vreinterpret_u8_u64(vw));
const uint16x4x2_t vxyzw_lo = vzip_u16(
vreinterpret_u16_u8(vxy.val[0]), vreinterpret_u16_u8(vzw.val[0]));
const uint16x4x2_t vxyzw_hi = vzip_u16(
vreinterpret_u16_u8(vxy.val[1]), vreinterpret_u16_u8(vzw.val[1]));
uint32x2_t vxyzw0 = vreinterpret_u32_u16(vxyzw_lo.val[0]);
uint32x2_t vxyzw1 = vreinterpret_u32_u16(vxyzw_lo.val[1]);
uint32x2_t vxyzw2 = vreinterpret_u32_u16(vxyzw_hi.val[0]);
uint32x2_t vxyzw3 = vreinterpret_u32_u16(vxyzw_hi.val[1]);
if (k & 4) {
vst1_lane_u32(__builtin_assume_aligned(output, 1), vxyzw0, 0);
output = (void*)((uintptr_t)output + m);
vst1_lane_u32(__builtin_assume_aligned(output, 1), vxyzw0, 1);
output = (void*)((uintptr_t)output + m);
vst1_lane_u32(__builtin_assume_aligned(output, 1), vxyzw1, 0);
output = (void*)((uintptr_t)output + m);
vst1_lane_u32(__builtin_assume_aligned(output, 1), vxyzw1, 1);
output = (void*)((uintptr_t)output + m);
vxyzw0 = vxyzw2;
vxyzw1 = vxyzw3;
}
if (k & 2) {
vst1_lane_u32(__builtin_assume_aligned(output, 1), vxyzw0, 0);
output = (void*)((uintptr_t)output + m);
vst1_lane_u32(__builtin_assume_aligned(output, 1), vxyzw0, 1);
output = (void*)((uintptr_t)output + m);
vxyzw0 = vxyzw1;
}
if (k & 1) {
vst1_lane_u32(__builtin_assume_aligned(output, 1), vxyzw0, 0);
output = (void*)((uintptr_t)output + m);
}
}
output = (void*)((uintptr_t)output + output_increment);
if (output > last_output) {
output = last_output;
}
}
} else {
const uint8_t* i = input;
uint8_t* o = output;
size_t k = n;
do {
size_t l = m;
const uint8_t* ii = i++;
do {
*o++ = *ii;
ii += n;
} while (--l != 0);
} while (--k != 0);
}
}
| 6,025
| 32.853933
| 80
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/src/x8zip/xm-sse2.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <emmintrin.h>
#include <qnnpack/x8zip.h>
void pytorch_qnnp_x8zip_xm__sse2(
size_t n,
size_t m,
const void* input,
void* output) {
const uint8_t* w = input;
const size_t input_increment = n * 3;
const size_t output_increment = 4 - m * n;
const uint8_t* last_input = w + n * (m - 1);
void* last_output = (void*)((uintptr_t)output + (m - 4));
if (n >= 8) {
for (size_t i = 0; i < m; i += 4) {
size_t k = n;
w = (const uint8_t*)((uintptr_t)w + input_increment);
if (w >= last_input) {
w = last_input;
}
const uint8_t* z = (const uint8_t*)((uintptr_t)w - n);
const uint8_t* y = (const uint8_t*)((uintptr_t)z - n);
const uint8_t* x = (const uint8_t*)((uintptr_t)y - n);
while (k >= 16) {
const __m128i vx = _mm_loadu_si128((const __m128i*)x);
x += 16;
const __m128i vy = _mm_loadu_si128((const __m128i*)y);
y += 16;
const __m128i vz = _mm_loadu_si128((const __m128i*)z);
z += 16;
const __m128i vw = _mm_loadu_si128((const __m128i*)w);
w += 16;
const __m128i vxy_lo = _mm_unpacklo_epi8(vx, vy);
const __m128i vxy_hi = _mm_unpackhi_epi8(vx, vy);
const __m128i vzw_lo = _mm_unpacklo_epi8(vz, vw);
const __m128i vzw_hi = _mm_unpackhi_epi8(vz, vw);
__m128i vxyzw0 = _mm_unpacklo_epi16(vxy_lo, vzw_lo);
__m128i vxyzw1 = _mm_unpackhi_epi16(vxy_lo, vzw_lo);
__m128i vxyzw2 = _mm_unpacklo_epi16(vxy_hi, vzw_hi);
__m128i vxyzw3 = _mm_unpackhi_epi16(vxy_hi, vzw_hi);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
vxyzw0 = _mm_unpackhi_epi64(vxyzw0, vxyzw0);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw1);
output = (void*)((uintptr_t)output + m);
vxyzw1 = _mm_shufflelo_epi16(vxyzw1, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw1);
output = (void*)((uintptr_t)output + m);
vxyzw1 = _mm_unpackhi_epi64(vxyzw1, vxyzw1);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw1);
output = (void*)((uintptr_t)output + m);
vxyzw1 = _mm_shufflelo_epi16(vxyzw1, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw1);
output = (void*)((uintptr_t)output + m);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw2);
output = (void*)((uintptr_t)output + m);
vxyzw2 = _mm_shufflelo_epi16(vxyzw2, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw2);
output = (void*)((uintptr_t)output + m);
vxyzw2 = _mm_unpackhi_epi64(vxyzw2, vxyzw2);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw2);
output = (void*)((uintptr_t)output + m);
vxyzw2 = _mm_shufflelo_epi16(vxyzw2, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw2);
output = (void*)((uintptr_t)output + m);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw3);
output = (void*)((uintptr_t)output + m);
vxyzw3 = _mm_shufflelo_epi16(vxyzw3, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw3);
output = (void*)((uintptr_t)output + m);
vxyzw3 = _mm_unpackhi_epi64(vxyzw3, vxyzw3);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw3);
output = (void*)((uintptr_t)output + m);
vxyzw3 = _mm_shufflelo_epi16(vxyzw3, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw3);
output = (void*)((uintptr_t)output + m);
k -= 16;
};
if (k >= 8) {
const __m128i vx = _mm_loadl_epi64((const __m128i*)x);
x += 8;
const __m128i vy = _mm_loadl_epi64((const __m128i*)y);
y += 8;
const __m128i vz = _mm_loadl_epi64((const __m128i*)z);
z += 8;
const __m128i vw = _mm_loadl_epi64((const __m128i*)w);
w += 8;
const __m128i vxy = _mm_unpacklo_epi8(vx, vy);
const __m128i vzw = _mm_unpacklo_epi8(vz, vw);
__m128i vxyzw0 = _mm_unpacklo_epi16(vxy, vzw);
__m128i vxyzw1 = _mm_unpackhi_epi16(vxy, vzw);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
vxyzw0 = _mm_unpackhi_epi64(vxyzw0, vxyzw0);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw1);
output = (void*)((uintptr_t)output + m);
vxyzw1 = _mm_shufflelo_epi16(vxyzw1, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw1);
output = (void*)((uintptr_t)output + m);
vxyzw1 = _mm_unpackhi_epi64(vxyzw1, vxyzw1);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw1);
output = (void*)((uintptr_t)output + m);
vxyzw1 = _mm_shufflelo_epi16(vxyzw1, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw1);
output = (void*)((uintptr_t)output + m);
k -= 8;
}
if (k != 0) {
const size_t address_decrement = 8 - k;
x -= address_decrement;
y -= address_decrement;
z -= address_decrement;
w -= address_decrement;
const __m128i vshift = _mm_cvtsi32_si128(8 * address_decrement);
const __m128i vx =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)x), vshift);
const __m128i vy =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)y), vshift);
const __m128i vz =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)z), vshift);
const __m128i vw =
_mm_srl_epi64(_mm_loadl_epi64((const __m128i*)w), vshift);
w += 8;
const __m128i vxy = _mm_unpacklo_epi8(vx, vy);
const __m128i vzw = _mm_unpacklo_epi8(vz, vw);
__m128i vxyzw0 = _mm_unpacklo_epi16(vxy, vzw);
__m128i vxyzw1 = _mm_unpackhi_epi16(vxy, vzw);
if (k & 4) {
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
vxyzw0 = _mm_unpackhi_epi64(vxyzw0, vxyzw0);
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
vxyzw0 = vxyzw1;
}
if (k & 2) {
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
vxyzw0 = _mm_shufflelo_epi16(vxyzw0, _MM_SHUFFLE(3, 2, 3, 2));
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
vxyzw0 = _mm_unpackhi_epi64(vxyzw0, vxyzw0);
}
if (k & 1) {
*((uint32_t*)output) = _mm_cvtsi128_si32(vxyzw0);
output = (void*)((uintptr_t)output + m);
}
}
output = (void*)((uintptr_t)output + output_increment);
if (output > last_output) {
output = last_output;
}
}
} else {
const uint8_t* i = input;
uint8_t* o = output;
size_t k = n;
do {
size_t l = m;
const uint8_t* ii = i++;
do {
*o++ = *ii;
ii += n;
} while (--l != 0);
} while (--k != 0);
}
}
| 8,669
| 40.483254
| 72
|
c
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/add-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
class AddOperatorTester {
public:
inline AddOperatorTester& channels(size_t channels) {
assert(channels != 0);
this->channels_ = channels;
return *this;
}
inline size_t channels() const {
return this->channels_;
}
inline AddOperatorTester& aStride(size_t aStride) {
assert(aStride != 0);
this->aStride_ = aStride;
return *this;
}
inline size_t aStride() const {
if (this->aStride_ == 0) {
return this->channels_;
} else {
assert(this->aStride_ >= this->channels_);
return this->aStride_;
}
}
inline AddOperatorTester& bStride(size_t bStride) {
assert(bStride != 0);
this->bStride_ = bStride;
return *this;
}
inline size_t bStride() const {
if (this->bStride_ == 0) {
return this->channels_;
} else {
assert(this->bStride_ >= this->channels_);
return this->bStride_;
}
}
inline AddOperatorTester& yStride(size_t yStride) {
assert(yStride != 0);
this->yStride_ = yStride;
return *this;
}
inline size_t yStride() const {
if (this->yStride_ == 0) {
return this->channels_;
} else {
assert(this->yStride_ >= this->channels_);
return this->yStride_;
}
}
inline AddOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline AddOperatorTester& aScale(float aScale) {
assert(aScale > 0.0f);
assert(std::isnormal(aScale));
this->aScale_ = aScale;
return *this;
}
inline float aScale() const {
return this->aScale_;
}
inline AddOperatorTester& aZeroPoint(uint8_t aZeroPoint) {
this->aZeroPoint_ = aZeroPoint;
return *this;
}
inline uint8_t aZeroPoint() const {
return this->aZeroPoint_;
}
inline AddOperatorTester& bScale(float bScale) {
assert(bScale > 0.0f);
assert(std::isnormal(bScale));
this->bScale_ = bScale;
return *this;
}
inline float bScale() const {
return this->bScale_;
}
inline AddOperatorTester& bZeroPoint(uint8_t bZeroPoint) {
this->bZeroPoint_ = bZeroPoint;
return *this;
}
inline uint8_t bZeroPoint() const {
return this->bZeroPoint_;
}
inline AddOperatorTester& yScale(float yScale) {
assert(yScale > 0.0f);
assert(std::isnormal(yScale));
this->yScale_ = yScale;
return *this;
}
inline float yScale() const {
return this->yScale_;
}
inline AddOperatorTester& yZeroPoint(uint8_t yZeroPoint) {
this->yZeroPoint_ = yZeroPoint;
return *this;
}
inline uint8_t yZeroPoint() const {
return this->yZeroPoint_;
}
inline AddOperatorTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline AddOperatorTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline AddOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testQ8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> a((batchSize() - 1) * aStride() + channels());
std::vector<uint8_t> b((batchSize() - 1) * bStride() + channels());
std::vector<uint8_t> y((batchSize() - 1) * yStride() + channels());
std::vector<float> yRef(batchSize() * channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(a.begin(), a.end(), std::ref(u8rng));
std::generate(b.begin(), b.end(), std::ref(u8rng));
std::fill(y.begin(), y.end(), 0xA5);
if (batchSize() * channels() > 3) {
ASSERT_NE(
*std::max_element(a.cbegin(), a.cend()),
*std::min_element(a.cbegin(), a.cend()));
ASSERT_NE(
*std::max_element(b.cbegin(), b.cend()),
*std::min_element(b.cbegin(), b.cend()));
}
/* Compute reference results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
yRef[i * channels() + c] = float(yZeroPoint()) +
float(int32_t(a[i * aStride() + c]) - int32_t(aZeroPoint())) *
(aScale() / yScale()) +
float(int32_t(b[i * bStride() + c]) - int32_t(bZeroPoint())) *
(bScale() / yScale());
yRef[i * channels() + c] =
std::min<float>(yRef[i * channels() + c], float(qmax()));
yRef[i * channels() + c] =
std::max<float>(yRef[i * channels() + c], float(qmin()));
}
}
/* Create, setup, run, and destroy Add operator */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t add_op = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_add_nc_q8(
channels(),
aZeroPoint(),
aScale(),
bZeroPoint(),
bScale(),
yZeroPoint(),
yScale(),
qmin(),
qmax(),
0,
&add_op));
ASSERT_NE(nullptr, add_op);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_add_nc_q8(
add_op,
batchSize(),
a.data(),
aStride(),
b.data(),
bStride(),
y.data(),
yStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(add_op, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success, pytorch_qnnp_delete_operator(add_op));
add_op = nullptr;
/* Verify results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_LE(uint32_t(y[i * yStride() + c]), uint32_t(qmax()));
ASSERT_GE(uint32_t(y[i * yStride() + c]), uint32_t(qmin()));
ASSERT_NEAR(
float(int32_t(y[i * yStride() + c])),
yRef[i * channels() + c],
0.6f);
}
}
}
}
private:
size_t batchSize_{1};
size_t channels_{1};
size_t aStride_{0};
size_t bStride_{0};
size_t yStride_{0};
float aScale_{0.75f};
float bScale_{1.25f};
float yScale_{0.96875f};
uint8_t aZeroPoint_{121};
uint8_t bZeroPoint_{127};
uint8_t yZeroPoint_{133};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{15};
};
| 7,147
| 24.347518
| 77
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/avgpool-microkernel-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <qnnpack/AlignedAllocator.h>
#include <qnnpack/params.h>
#include <qnnpack/requantization.h>
class AvgPoolMicrokernelTester {
public:
inline AvgPoolMicrokernelTester& n(size_t n) {
assert(n != 0);
this->n_ = n;
return *this;
}
inline size_t n() const {
return this->n_;
}
inline AvgPoolMicrokernelTester& s(size_t s) {
assert(s != 0);
this->s_ = s;
return *this;
}
inline size_t s() const {
return this->s_;
}
inline AvgPoolMicrokernelTester& kh(size_t kh) {
assert(kh != 0);
this->kh_ = kh;
return *this;
}
inline size_t kh() const {
return this->kh_;
}
inline AvgPoolMicrokernelTester& kw(size_t kw) {
assert(kw != 0);
this->kw_ = kw;
return *this;
}
inline size_t kw() const {
return this->kw_;
}
inline size_t ks() const {
return kh() * kw();
}
inline size_t packedKs() const {
if (kc() < kr()) {
return ks();
} else if (ks() <= mr()) {
return mr();
} else {
return (ks() - mr()) % qr() == 0
? ks()
: ((ks() - mr()) / qr() + 1) * qr() + mr();
}
}
inline AvgPoolMicrokernelTester& mr(size_t mr) {
assert(mr != 0);
this->mr_ = mr;
return *this;
}
inline size_t mr() const {
return this->mr_;
}
inline AvgPoolMicrokernelTester& qr(size_t qr) {
assert(qr != 0);
this->qr_ = qr;
return *this;
}
inline size_t qr() const {
return this->qr_;
}
inline AvgPoolMicrokernelTester& kc(size_t kc) {
assert(kc != 0);
this->kc_ = kc;
return *this;
}
inline size_t kc() const {
return this->kc_;
}
inline AvgPoolMicrokernelTester& kr(size_t kr) {
assert(kr != 0);
this->kr_ = kr;
return *this;
}
inline size_t kr() const {
return this->kr_;
}
inline size_t packedN() const {
return kc() % kr() == 0 ? kc() : (kc() / kr() + 1) * kr();
}
inline AvgPoolMicrokernelTester& xStride(size_t xStride) {
assert(xStride != 0);
this->xStride_ = xStride;
return *this;
}
inline size_t xStride() const {
if (this->xStride_ == 0) {
return kc();
} else {
assert(this->xStride_ >= kc());
return this->xStride_;
}
}
inline AvgPoolMicrokernelTester& yStride(size_t yStride) {
assert(yStride != 0);
this->yStride_ = yStride;
return *this;
}
inline size_t yStride() const {
if (this->yStride_ == 0) {
return kc();
} else {
assert(this->yStride_ >= kc());
return this->yStride_;
}
}
inline AvgPoolMicrokernelTester& xScale(float xScale) {
assert(xScale > 0.0f);
assert(std::isnormal(xScale));
this->xScale_ = xScale;
return *this;
}
inline float xScale() const {
return this->xScale_;
}
inline AvgPoolMicrokernelTester& xZeroPoint(uint8_t xZeroPoint) {
this->xZeroPoint_ = xZeroPoint;
return *this;
}
inline uint8_t xZeroPoint() const {
return this->xZeroPoint_;
}
inline AvgPoolMicrokernelTester& yScale(float yScale) {
assert(yScale > 0.0f);
assert(std::isnormal(yScale));
this->yScale_ = yScale;
return *this;
}
inline float yScale() const {
return this->yScale_;
}
inline AvgPoolMicrokernelTester& yZeroPoint(uint8_t yZeroPoint) {
this->yZeroPoint_ = yZeroPoint;
return *this;
}
inline uint8_t yZeroPoint() const {
return this->yZeroPoint_;
}
inline AvgPoolMicrokernelTester& yMin(uint8_t yMin) {
this->yMin_ = yMin;
return *this;
}
inline uint8_t yMin() const {
return this->yMin_;
}
inline AvgPoolMicrokernelTester& yMax(uint8_t yMax) {
this->yMax_ = yMax;
return *this;
}
inline uint8_t yMax() const {
return this->yMax_;
}
inline AvgPoolMicrokernelTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void test(pytorch_q8avgpool_up_ukernel_function q8avgpool) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<const uint8_t*> indirectX(packedKs() + (n() * s() - 1) * kh());
std::vector<uint8_t> x((indirectX.size() - 1) * xStride() + kc());
std::vector<uint8_t> zero(kc());
std::vector<uint8_t> y((n() - 1) * yStride() + kc());
std::vector<uint8_t> yRef(n() * kc());
std::vector<float> yFP(n() * kc());
std::vector<int32_t> yAcc(n() * kc());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(x.begin(), x.end(), std::ref(u8rng));
std::fill(y.begin(), y.end(), 0xA5);
for (size_t i = 0; i < indirectX.size(); i++) {
indirectX[i] = x.data() + i * xStride();
}
std::shuffle(indirectX.begin(), indirectX.end(), rng);
/* Prepare quantization parameters */
const union pytorch_qnnp_avgpool_quantization_params quantizationParams =
pytorch_qnnp_compute_avgpool_quantization_params(
-int32_t(xZeroPoint()) * int32_t(ks()),
xScale() / (yScale() * float(ks())),
yZeroPoint(),
yMin(),
yMax());
const union pytorch_qnnp_avgpool_quantization_params
scalarQuantizationParams =
pytorch_qnnp_compute_scalar_avgpool_quantization_params(
-int32_t(xZeroPoint()) * int32_t(ks()),
xScale() / (yScale() * float(ks())),
yZeroPoint(),
yMin(),
yMax());
/* Compute reference results */
for (size_t i = 0; i < n(); i++) {
for (size_t k = 0; k < kc(); k++) {
int32_t acc = scalarQuantizationParams.scalar.bias;
for (size_t j = 0; j < ks(); j++) {
acc += indirectX[i * s() * kh() + j][k];
}
yAcc[i * kc() + k] = acc;
yRef[i * kc() + k] =
pytorch_qnnp_avgpool_quantize(acc, scalarQuantizationParams);
yFP[i * kc() + k] =
float(acc) * (xScale() / (yScale() * float(ks()))) +
float(yZeroPoint());
yFP[i * kc() + k] = std::min<float>(yFP[i * kc() + k], float(yMax()));
yFP[i * kc() + k] = std::max<float>(yFP[i * kc() + k], float(yMin()));
}
}
/* Call optimized micro-kernel */
q8avgpool(
n(),
ks(),
kc(),
indirectX.data(),
zero.data(),
y.data(),
kh() * s() * sizeof(void*),
(yStride() - kc()) * sizeof(uint8_t),
&quantizationParams);
/* Verify results */
for (size_t i = 0; i < n(); i++) {
for (size_t k = 0; k < kc(); k++) {
ASSERT_LE(uint32_t(y[i * yStride() + k]), uint32_t(yMax()))
<< "at pixel " << i << ", channel " << k << ", n = " << n()
<< ", kc = " << kc();
ASSERT_GE(uint32_t(y[i * yStride() + k]), uint32_t(yMin()))
<< "at pixel " << i << ", channel " << k << ", n = " << n()
<< ", kc = " << kc();
ASSERT_NEAR(
float(int32_t(y[i * yStride() + k])), yFP[i * kc() + k], 0.5001f)
<< "at pixel " << i << ", channel " << k << ", n = " << n()
<< ", ks = " << kh() << "x" << kw() << " (" << ks()
<< "), kc = " << kc() << ", acc = " << yAcc[i * kc() + k];
ASSERT_EQ(
uint32_t(yRef[i * kc() + k]), uint32_t(y[i * yStride() + k]))
<< "at pixel " << i << ", channel " << k << ", n = " << n()
<< ", ks = " << kh() << "x" << kw() << " (" << ks()
<< "), kc = " << kc() << ", acc = " << yAcc[i * kc() + k];
}
}
}
}
void test(pytorch_q8avgpool_mp_ukernel_function q8avgpool) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<const uint8_t*> indirectX(packedKs() + (n() * s() - 1) * kh());
std::vector<uint8_t> x((indirectX.size() - 1) * xStride() + kc());
std::vector<int32_t, AlignedAllocator<int32_t, 16>> mpAcc(packedN());
std::vector<uint8_t> zero(kc());
std::vector<uint8_t> y((n() - 1) * yStride() + kc());
std::vector<uint8_t> yRef(n() * kc());
std::vector<float> yFP(n() * kc());
std::vector<int32_t> yAcc(n() * kc());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(x.begin(), x.end(), std::ref(u8rng));
std::fill(y.begin(), y.end(), 0xA5);
for (size_t i = 0; i < indirectX.size(); i++) {
indirectX[i] = x.data() + i * xStride();
}
std::shuffle(indirectX.begin(), indirectX.end(), rng);
/* Prepare quantization parameters */
const union pytorch_qnnp_avgpool_quantization_params quantizationParams =
pytorch_qnnp_compute_avgpool_quantization_params(
-int32_t(xZeroPoint()) * int32_t(ks()),
xScale() / (yScale() * float(ks())),
yZeroPoint(),
yMin(),
yMax());
const union pytorch_qnnp_avgpool_quantization_params
scalarQuantizationParams =
pytorch_qnnp_compute_scalar_avgpool_quantization_params(
-int32_t(xZeroPoint()) * int32_t(ks()),
xScale() / (yScale() * float(ks())),
yZeroPoint(),
yMin(),
yMax());
/* Compute reference results */
for (size_t i = 0; i < n(); i++) {
for (size_t k = 0; k < kc(); k++) {
int32_t acc = scalarQuantizationParams.scalar.bias;
for (size_t j = 0; j < ks(); j++) {
acc += indirectX[i * s() * kh() + j][k];
}
yAcc[i * kc() + k] = acc;
yRef[i * kc() + k] =
pytorch_qnnp_avgpool_quantize(acc, scalarQuantizationParams);
yFP[i * kc() + k] =
float(acc) * (xScale() / (yScale() * float(ks()))) +
float(yZeroPoint());
yFP[i * kc() + k] = std::min<float>(yFP[i * kc() + k], float(yMax()));
yFP[i * kc() + k] = std::max<float>(yFP[i * kc() + k], float(yMin()));
}
}
/* Call optimized micro-kernel */
q8avgpool(
n(),
ks(),
kc(),
indirectX.data(),
zero.data(),
mpAcc.data(),
y.data(),
(kh() * s() - (packedKs() - qr())) * sizeof(void*),
(yStride() - kc()) * sizeof(uint8_t),
&quantizationParams);
/* Verify results */
for (size_t i = 0; i < n(); i++) {
for (size_t k = 0; k < kc(); k++) {
ASSERT_LE(uint32_t(y[i * yStride() + k]), uint32_t(yMax()))
<< "at pixel " << i << ", channel " << k << ", n = " << n()
<< ", kc = " << kc();
ASSERT_GE(uint32_t(y[i * yStride() + k]), uint32_t(yMin()))
<< "at pixel " << i << ", channel " << k << ", n = " << n()
<< ", kc = " << kc();
ASSERT_NEAR(
float(int32_t(y[i * yStride() + k])), yFP[i * kc() + k], 0.5001f)
<< "at pixel " << i << ", channel " << k << ", n = " << n()
<< ", ks = " << kh() << "x" << kw() << " (" << ks()
<< "), kc = " << kc() << ", acc = " << yAcc[i * kc() + k];
ASSERT_EQ(
uint32_t(yRef[i * kc() + k]), uint32_t(y[i * yStride() + k]))
<< "at pixel " << i << ", channel " << k << ", n = " << n()
<< ", ks = " << kh() << "x" << kw() << " (" << ks()
<< "), kc = " << kc() << ", acc = " << yAcc[i * kc() + k];
}
}
}
}
private:
size_t n_{1};
size_t s_{1};
size_t kh_{1};
size_t kw_{1};
size_t mr_{1};
size_t qr_{1};
size_t kc_{1};
size_t kr_{1};
size_t xStride_{0};
size_t yStride_{0};
float xScale_{1.25f};
float yScale_{0.75f};
uint8_t xZeroPoint_{121};
uint8_t yZeroPoint_{133};
uint8_t yMin_{0};
uint8_t yMax_{255};
size_t iterations_{15};
};
| 12,512
| 28.1
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/channel-shuffle-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
class ChannelShuffleOperatorTester {
public:
inline ChannelShuffleOperatorTester& groups(size_t groups) {
assert(groups != 0);
this->groups_ = groups;
return *this;
}
inline size_t groups() const {
return this->groups_;
}
inline ChannelShuffleOperatorTester& groupChannels(size_t groupChannels) {
assert(groupChannels != 0);
this->groupChannels_ = groupChannels;
return *this;
}
inline size_t groupChannels() const {
return this->groupChannels_;
}
inline size_t channels() const {
return groups() * groupChannels();
}
inline ChannelShuffleOperatorTester& inputStride(size_t inputStride) {
assert(inputStride != 0);
this->inputStride_ = inputStride;
return *this;
}
inline size_t inputStride() const {
if (this->inputStride_ == 0) {
return channels();
} else {
assert(this->inputStride_ >= channels());
return this->inputStride_;
}
}
inline ChannelShuffleOperatorTester& outputStride(size_t outputStride) {
assert(outputStride != 0);
this->outputStride_ = outputStride;
return *this;
}
inline size_t outputStride() const {
if (this->outputStride_ == 0) {
return channels();
} else {
assert(this->outputStride_ >= channels());
return this->outputStride_;
}
}
inline ChannelShuffleOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline ChannelShuffleOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testX8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input((batchSize() - 1) * inputStride() + channels());
std::vector<uint8_t> output(
(batchSize() - 1) * outputStride() + channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Create, setup, run, and destroy Channel Shuffle operator */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t channel_shuffle_op = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_channel_shuffle_nc_x8(
groups(), groupChannels(), 0, &channel_shuffle_op));
ASSERT_NE(nullptr, channel_shuffle_op);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_channel_shuffle_nc_x8(
channel_shuffle_op,
batchSize(),
input.data(),
inputStride(),
output.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(
channel_shuffle_op, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_delete_operator(channel_shuffle_op));
channel_shuffle_op = nullptr;
/* Verify results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t g = 0; g < groups(); g++) {
for (size_t c = 0; c < groupChannels(); c++) {
ASSERT_EQ(
uint32_t(input[i * inputStride() + g * groupChannels() + c]),
uint32_t(output[i * outputStride() + c * groups() + g]));
}
}
}
}
}
private:
size_t groups_{1};
size_t groupChannels_{1};
size_t batchSize_{1};
size_t inputStride_{0};
size_t outputStride_{0};
size_t iterations_{15};
};
| 4,253
| 25.924051
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/clamp-microkernel-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <qnnpack/params.h>
#include <qnnpack/requantization.h>
class ClampMicrokernelTester {
public:
inline ClampMicrokernelTester& n(size_t n) {
assert(n != 0);
this->n_ = n;
return *this;
}
inline size_t n() const {
return this->n_;
}
inline ClampMicrokernelTester& inplace(bool inplace) {
this->inplace_ = inplace;
return *this;
}
inline bool inplace() const {
return this->inplace_;
}
inline ClampMicrokernelTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline ClampMicrokernelTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline ClampMicrokernelTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void test(pytorch_u8clamp_ukernel_function u8clamp) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> x(n());
std::vector<uint8_t> y(n());
std::vector<uint8_t> yRef(n());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(x.begin(), x.end(), std::ref(u8rng));
if (inplace()) {
std::generate(y.begin(), y.end(), std::ref(u8rng));
} else {
std::fill(y.begin(), y.end(), 0xA5);
}
const uint8_t* xData = inplace() ? y.data() : x.data();
/* Prepare clamping parameters */
const union pytorch_qnnp_u8_clamping_params clampingParams =
pytorch_qnnp_compute_u8_clamping_params(qmin(), qmax());
/* Compute reference results */
for (size_t i = 0; i < n(); i++) {
yRef[i] = std::max(std::min(xData[i], qmax()), qmin());
}
/* Call optimized micro-kernel */
u8clamp(n(), xData, y.data(), &clampingParams);
/* Verify results */
for (size_t i = 0; i < n(); i++) {
ASSERT_LE(uint32_t(y[i]), uint32_t(qmax()))
<< "at position " << i << ", n = " << n();
ASSERT_GE(uint32_t(y[i]), uint32_t(qmin()))
<< "at position " << i << ", n = " << n();
ASSERT_EQ(uint32_t(yRef[i]), uint32_t(y[i]))
<< "at position " << i << ", n = " << n() << ", qmin = " << qmin()
<< ", qmax = " << qmax();
}
}
}
private:
size_t n_{1};
bool inplace_{false};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{15};
};
| 3,011
| 24.310924
| 78
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/clamp-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
class ClampOperatorTester {
public:
inline ClampOperatorTester& channels(size_t channels) {
assert(channels != 0);
this->channels_ = channels;
return *this;
}
inline size_t channels() const {
return this->channels_;
}
inline ClampOperatorTester& inputStride(size_t inputStride) {
assert(inputStride != 0);
this->inputStride_ = inputStride;
return *this;
}
inline size_t inputStride() const {
if (this->inputStride_ == 0) {
return this->channels_;
} else {
assert(this->inputStride_ >= this->channels_);
return this->inputStride_;
}
}
inline ClampOperatorTester& outputStride(size_t outputStride) {
assert(outputStride != 0);
this->outputStride_ = outputStride;
return *this;
}
inline size_t outputStride() const {
if (this->outputStride_ == 0) {
return this->channels_;
} else {
assert(this->outputStride_ >= this->channels_);
return this->outputStride_;
}
}
inline ClampOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline ClampOperatorTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline ClampOperatorTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline ClampOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testU8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input((batchSize() - 1) * inputStride() + channels());
std::vector<uint8_t> output(
(batchSize() - 1) * outputStride() + channels());
std::vector<uint8_t> outputRef(batchSize() * channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Compute reference results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
const uint8_t x = input[i * inputStride() + c];
const uint8_t y = std::min(std::max(x, qmin()), qmax());
outputRef[i * channels() + c] = y;
}
}
/* Create, setup, run, and destroy Sigmoid operator */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t clampOp = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_clamp_nc_u8(
channels(), qmin(), qmax(), 0, &clampOp));
ASSERT_NE(nullptr, clampOp);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_clamp_nc_u8(
clampOp,
batchSize(),
input.data(),
inputStride(),
output.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(clampOp, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success, pytorch_qnnp_delete_operator(clampOp));
clampOp = nullptr;
/* Verify results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_LE(uint32_t(output[i * channels() + c]), uint32_t(qmax()))
<< "at position " << i << ", batch size = " << batchSize()
<< ", channels = " << channels();
ASSERT_GE(uint32_t(output[i * channels() + c]), uint32_t(qmin()))
<< "at position " << i << ", batch size = " << batchSize()
<< ", channels = " << channels();
ASSERT_EQ(
uint32_t(outputRef[i * channels() + c]),
uint32_t(output[i * outputStride() + c]))
<< "at position " << i << ", batch size = " << batchSize()
<< ", channels = " << channels() << ", qmin = " << qmin()
<< ", qmax = " << qmax();
}
}
}
}
private:
size_t batchSize_{1};
size_t channels_{1};
size_t inputStride_{0};
size_t outputStride_{0};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{15};
};
| 4,964
| 26.893258
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/deconvolution-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <memory>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
#include <qnnpack_func.h>
#include "test_utils.h"
using namespace qnnpack::testing;
class DeconvolutionOperatorTester {
public:
inline DeconvolutionOperatorTester& padding(uint32_t padding) {
this->paddingHeight_ = padding;
this->paddingWidth_ = padding;
return *this;
}
inline DeconvolutionOperatorTester& padding(
uint32_t paddingHeight,
uint32_t paddingWidth) {
this->paddingHeight_ = paddingHeight;
this->paddingWidth_ = paddingWidth;
return *this;
}
inline DeconvolutionOperatorTester& paddingHeight(uint32_t paddingHeight) {
this->paddingHeight_ = paddingHeight;
return *this;
}
inline uint32_t paddingHeight() const {
return this->paddingHeight_;
}
inline DeconvolutionOperatorTester& paddingWidth(uint32_t paddingWidth) {
this->paddingWidth_ = paddingWidth;
return *this;
}
inline uint32_t paddingWidth() const {
return this->paddingWidth_;
}
inline DeconvolutionOperatorTester& adjustmentHeight(
uint32_t adjustmentHeight) {
this->adjustmentHeight_ = adjustmentHeight;
return *this;
}
inline uint32_t adjustmentHeight() const {
return this->adjustmentHeight_;
}
inline DeconvolutionOperatorTester& adjustmentWidth(
uint32_t adjustmentWidth) {
this->adjustmentWidth_ = adjustmentWidth;
return *this;
}
inline uint32_t adjustmentWidth() const {
return this->adjustmentWidth_;
}
inline DeconvolutionOperatorTester& inputSize(
uint32_t inputHeight,
uint32_t inputWidth) {
assert(inputHeight >= 1);
assert(inputWidth >= 1);
this->inputHeight_ = inputHeight;
this->inputWidth_ = inputWidth;
return *this;
}
inline DeconvolutionOperatorTester& inputHeight(uint32_t inputHeight) {
assert(inputHeight >= 1);
this->inputHeight_ = inputHeight;
return *this;
}
inline uint32_t inputHeight() const {
return this->inputHeight_;
}
inline DeconvolutionOperatorTester& inputWidth(uint32_t inputWidth) {
assert(inputWidth >= 1);
this->inputWidth_ = inputWidth;
return *this;
}
inline uint32_t inputWidth() const {
return this->inputWidth_;
}
inline DeconvolutionOperatorTester& groups(uint32_t groups) {
assert(groups >= 1);
this->groups_ = groups;
return *this;
}
inline uint32_t groups() const {
return this->groups_;
}
inline DeconvolutionOperatorTester& groupInputChannels(
size_t groupInputChannels) {
assert(groupInputChannels >= 1);
this->groupInputChannels_ = groupInputChannels;
return *this;
}
inline size_t groupInputChannels() const {
return this->groupInputChannels_;
}
inline DeconvolutionOperatorTester& per_channel(bool per_channel) {
this->per_channel_ = per_channel;
return *this;
}
inline bool per_channel() const {
return this->per_channel_;
}
inline DeconvolutionOperatorTester& groupOutputChannels(
size_t groupOutputChannels) {
assert(groupOutputChannels >= 1);
this->groupOutputChannels_ = groupOutputChannels;
return *this;
}
inline size_t groupOutputChannels() const {
return this->groupOutputChannels_;
}
inline DeconvolutionOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline DeconvolutionOperatorTester& kernelSize(uint32_t kernelSize) {
assert(kernelSize >= 1);
this->kernelHeight_ = kernelSize;
this->kernelWidth_ = kernelSize;
return *this;
}
inline DeconvolutionOperatorTester& kernelSize(
uint32_t kernelHeight,
uint32_t kernelWidth) {
assert(kernelHeight >= 1);
assert(kernelWidth >= 1);
this->kernelHeight_ = kernelHeight;
this->kernelWidth_ = kernelWidth;
return *this;
}
inline DeconvolutionOperatorTester& kernelHeight(uint32_t kernelHeight) {
assert(kernelHeight >= 1);
this->kernelHeight_ = kernelHeight;
return *this;
}
inline uint32_t kernelHeight() const {
return this->kernelHeight_;
}
inline DeconvolutionOperatorTester& kernelWidth(uint32_t kernelWidth) {
assert(kernelWidth >= 1);
this->kernelWidth_ = kernelWidth;
return *this;
}
inline uint32_t kernelWidth() const {
return this->kernelWidth_;
}
inline DeconvolutionOperatorTester& dilation(uint32_t dilation) {
assert(dilation >= 1);
this->dilationHeight_ = dilation;
this->dilationWidth_ = dilation;
return *this;
}
inline DeconvolutionOperatorTester& dilation(
uint32_t dilationHeight,
uint32_t dilationWidth) {
assert(dilationHeight >= 1);
assert(dilationWidth >= 1);
this->dilationHeight_ = dilationHeight;
this->dilationWidth_ = dilationWidth;
return *this;
}
inline DeconvolutionOperatorTester& dilationHeight(uint32_t dilationHeight) {
assert(dilationHeight >= 1);
this->dilationHeight_ = dilationHeight;
return *this;
}
inline uint32_t dilationHeight() const {
return this->dilationHeight_;
}
inline DeconvolutionOperatorTester& dilationWidth(uint32_t dilationWidth) {
assert(dilationWidth >= 1);
this->dilationWidth_ = dilationWidth;
return *this;
}
inline uint32_t dilationWidth() const {
return this->dilationWidth_;
}
inline DeconvolutionOperatorTester& stride(uint32_t stride) {
assert(stride >= 1);
this->strideHeight_ = stride;
this->strideWidth_ = stride;
return *this;
}
inline DeconvolutionOperatorTester& stride(
uint32_t strideHeight,
uint32_t strideWidth) {
assert(strideHeight >= 1);
assert(strideWidth >= 1);
this->strideHeight_ = strideHeight;
this->strideWidth_ = strideWidth;
return *this;
}
inline DeconvolutionOperatorTester& strideHeight(uint32_t strideHeight) {
assert(strideHeight >= 1);
this->strideHeight_ = strideHeight;
return *this;
}
inline uint32_t strideHeight() const {
return this->strideHeight_;
}
inline DeconvolutionOperatorTester& strideWidth(uint32_t strideWidth) {
assert(strideWidth >= 1);
this->strideWidth_ = strideWidth;
return *this;
}
inline uint32_t strideWidth() const {
return this->strideWidth_;
}
inline DeconvolutionOperatorTester& inputPixelStride(
size_t inputPixelStride) {
assert(inputPixelStride >= 1);
this->inputPixelStride_ = inputPixelStride;
return *this;
}
inline size_t inputPixelStride() const {
if (this->inputPixelStride_ == 0) {
return groupInputChannels() * groups();
} else {
assert(this->inputPixelStride_ >= groupInputChannels() * groups());
return this->inputPixelStride_;
}
}
inline DeconvolutionOperatorTester& outputPixelStride(
size_t outputPixelStride) {
assert(outputPixelStride >= 1);
this->outputPixelStride_ = outputPixelStride;
return *this;
}
inline size_t outputPixelStride() const {
if (this->outputPixelStride_ == 0) {
return groupOutputChannels() * groups();
} else {
assert(this->outputPixelStride_ >= groupOutputChannels() * groups());
return this->outputPixelStride_;
}
}
inline uint32_t dilatedKernelHeight() const {
return (kernelHeight() - 1) * dilationHeight() + 1;
}
inline uint32_t dilatedKernelWidth() const {
return (kernelWidth() - 1) * dilationWidth() + 1;
}
inline size_t outputHeight() const {
return strideHeight() * (inputHeight() - 1) + adjustmentHeight() +
dilatedKernelHeight() - paddingHeight() * 2;
}
inline size_t outputWidth() const {
return strideWidth() * (inputWidth() - 1) + adjustmentWidth() +
dilatedKernelWidth() - paddingWidth() * 2;
}
inline DeconvolutionOperatorTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline DeconvolutionOperatorTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline DeconvolutionOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testQ8(const Mode mode = Mode::Static) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto s32rng =
std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng);
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input(
batchSize() *
((inputHeight() * inputWidth() - 1) * inputPixelStride() +
groups() * groupInputChannels()) +
8);
std::vector<uint8_t> kernel(
groups() * groupOutputChannels() * kernelHeight() * kernelWidth() *
groupInputChannels());
std::vector<int32_t> bias(groups() * groupOutputChannels());
std::vector<uint8_t> output(
batchSize() *
((outputHeight() * outputWidth() - 1) * outputPixelStride() +
groups() * groupOutputChannels()));
std::vector<int32_t> accumulators(
batchSize() * outputHeight() * outputWidth() * groups() *
groupOutputChannels());
const uint8_t* inputPtr = input.data() + 8;
const uint8_t inputZeroPoint = 127;
// Make num zero points multiple of 8.
// This is the least common denominator for SSE/ARM kernels we have.
size_t num_zero_points_padded =
groups() * groupOutputChannels() + 8;
std::vector<uint8_t> kernelZeroPoints(num_zero_points_padded, 127);
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::generate(kernel.begin(), kernel.end(), std::ref(u8rng));
std::generate(bias.begin(), bias.end(), std::ref(s32rng));
if (per_channel()) {
std::generate(kernelZeroPoints.begin(), kernelZeroPoints.end(), std::ref(u8rng));
}
std::fill(output.begin(), output.end(), 0xA5);
std::fill(accumulators.begin(), accumulators.end(), 0);
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oy = 0; oy < outputHeight(); oy++) {
for (size_t ox = 0; ox < outputWidth(); ox++) {
for (size_t g = 0; g < groups(); g++) {
for (size_t oc = 0; oc < groupOutputChannels(); oc++) {
accumulators
[(((i * outputHeight() + oy) * outputWidth() + ox) *
groups() +
g) *
groupOutputChannels() +
oc] = bias[g * groupOutputChannels() + oc];
}
}
}
}
}
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oy = 0; oy < outputHeight(); oy++) {
for (size_t ox = 0; ox < outputWidth(); ox++) {
for (size_t ky = 0; ky < kernelHeight(); ky++) {
const size_t y = oy + paddingHeight() - ky * dilationHeight();
const size_t iy = y / strideHeight();
if (iy * strideHeight() == y && iy < inputHeight()) {
for (size_t kx = 0; kx < kernelWidth(); kx++) {
const size_t x = ox + paddingWidth() - kx * dilationWidth();
const size_t ix = x / strideWidth();
if (ix * strideWidth() == x && ix < inputWidth()) {
for (size_t g = 0; g < groups(); g++) {
for (size_t oc = 0; oc < groupOutputChannels(); oc++) {
for (size_t ic = 0; ic < groupInputChannels(); ic++) {
accumulators
[(((i * outputHeight() + oy) * outputWidth() +
ox) *
groups() +
g) *
groupOutputChannels() +
oc] +=
(int32_t(inputPtr
[((i * inputHeight() + iy) *
inputWidth() +
ix) *
inputPixelStride() +
g * groupInputChannels() + ic]) -
int32_t(inputZeroPoint)) *
(int32_t(kernel
[(((g * groupInputChannels() + ic) *
kernelHeight() +
ky) *
kernelWidth() +
kx) *
groupOutputChannels() +
oc]) -
int32_t(kernelZeroPoints[g* groupOutputChannels() + oc]));
}
}
}
}
}
}
}
}
}
}
// Create dummy min/max for empty inputs.
// These are only used to compute scale and zero point,
// and real callers will just pull those values from the model.
const int32_t accumulatorsMin = accumulators.empty()
? 0
: *std::min_element(accumulators.cbegin(), accumulators.cend());
const int32_t accumulatorsMax = accumulators.empty()
? 900
: *std::max_element(accumulators.cbegin(), accumulators.cend());
const double outputScale =
double(uint32_t(accumulatorsMax - accumulatorsMin)) / 255.0;
const uint8_t outputZeroPoint = uint8_t(std::max(
std::min(
lrint(
127.5 -
0.5 * double(accumulatorsMin + accumulatorsMax) /
outputScale),
long(std::numeric_limits<uint8_t>::max())),
long(std::numeric_limits<uint8_t>::min())));
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
std::vector<float> requantization_scales(num_zero_points_padded, 1.0 * 1.0 / outputScale);
auto f32rng =
std::bind(std::uniform_real_distribution<float>(1, 5), rng);
if (per_channel()) {
auto scale_generator = [&]() -> float {return (f32rng()/outputScale);};
std::generate(
requantization_scales.begin(),
requantization_scales.end(),
std::ref(scale_generator));
}
pytorch_qnnp_operator_t deconvolution = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_deconvolution2d_nhwc_q8(
paddingHeight(),
paddingWidth(),
adjustmentHeight(),
adjustmentWidth(),
kernelHeight(),
kernelWidth(),
strideHeight(),
strideWidth(),
dilationHeight(),
dilationWidth(),
groups(),
groupInputChannels(),
groupOutputChannels(),
inputZeroPoint,
kernelZeroPoints.data(),
kernel.data(),
bias.data(),
outputZeroPoint,
qmin(),
qmax(),
0,
requantization_scales.data(),
&deconvolution));
switch (mode) {
case Mode::Static: {
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_deconvolution2d_nhwc_q8(
deconvolution,
batchSize(),
inputHeight(),
inputWidth(),
inputPtr,
inputPixelStride(),
output.data(),
outputPixelStride(),
nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(deconvolution, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_delete_operator(deconvolution));
deconvolution = nullptr;
} break;
case Mode::Runtime:
{
auto packW = std::unique_ptr<qnnpack::PrePackConvWeights>(
new qnnpack::PrePackConvWeights(
deconvolution,
kernelZeroPoints.data(),
kernel.data(),
bias.data()));
ASSERT_EQ(
pytorch_qnnp_status_success,
qnnpack::qnnpackDeConv(
deconvolution,
packW->getPackedWeights(),
batchSize(),
inputHeight(),
inputWidth(),
inputZeroPoint,
inputPtr,
kernelZeroPoints.data(),
requantization_scales.data(),
outputZeroPoint,
qmin(),
qmax(),
output.data(),
nullptr));
}
break;
default:
ASSERT_TRUE(false);
}
for (size_t i = 0; i < batchSize(); i++) {
for (size_t y = 0; y < outputHeight(); y++) {
for (size_t x = 0; x < outputWidth(); x++) {
for (size_t g = 0; g < groups(); g++) {
for (size_t c = 0; c < groupOutputChannels(); c++) {
const double scaledAccumulator =
accumulators
[(((i * outputHeight() + y) * outputWidth() + x) *
groups() +
g) *
groupOutputChannels() +
c] *
requantization_scales[g * groupOutputChannels() + c];
const double clampedAccumulator = std::max(
std::min(
scaledAccumulator,
double(qmax()) - double(outputZeroPoint)),
double(qmin()) - double(outputZeroPoint));
ASSERT_NEAR(
clampedAccumulator,
(int32_t(
output
[((i * outputHeight() + y) * outputWidth() + x) *
outputPixelStride() +
g * groupOutputChannels() + c]) -
outputZeroPoint),
0.9)
<< "(x, y) = (" << x << ", " << y << "), group = " << g
<< ", channel = " << c;
ASSERT_LE(
double(
int32_t(output
[((i * outputHeight() + y) * outputWidth() +
x) *
outputPixelStride() +
g * groupOutputChannels() + c]) -
outputZeroPoint),
double(qmax()) - double(outputZeroPoint))
<< "(x, y) = (" << x << ", " << y << "), group = " << g
<< ", channel = " << c;
ASSERT_GE(
double(
int32_t(output
[((i * outputHeight() + y) * outputWidth() +
x) *
outputPixelStride() +
g * groupOutputChannels() + c]) -
outputZeroPoint),
double(qmin()) - double(outputZeroPoint))
<< "(x, y) = (" << x << ", " << y << "), group = " << g
<< ", channel = " << c;
}
}
}
}
}
}
}
private:
uint32_t paddingHeight_{0};
uint32_t paddingWidth_{0};
size_t inputHeight_{1};
size_t inputWidth_{1};
uint32_t groups_{1};
size_t groupInputChannels_{1};
size_t inputPixelStride_{0};
size_t groupOutputChannels_{1};
size_t outputPixelStride_{0};
size_t batchSize_{1};
uint32_t kernelHeight_{1};
uint32_t kernelWidth_{1};
uint32_t adjustmentHeight_{0};
uint32_t adjustmentWidth_{0};
uint32_t dilationHeight_{1};
uint32_t dilationWidth_{1};
uint32_t strideHeight_{1};
uint32_t strideWidth_{1};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{1};
bool per_channel_{false};
};
| 21,019
| 31.042683
| 96
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/dwconv-microkernel-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <qnnpack/AlignedAllocator.h>
#include <qnnpack/pack.h>
#include <qnnpack/params.h>
#include <qnnpack/requantization.h>
class DWConvMicrokernelTester {
public:
inline DWConvMicrokernelTester& width(uint32_t width) {
assert(width >= 1);
this->width_ = width;
return *this;
}
inline uint32_t width() const {
return this->width_;
}
inline DWConvMicrokernelTester& subsampling(uint32_t subsampling) {
assert(subsampling >= 1);
this->subsampling_ = subsampling;
return *this;
}
inline uint32_t subsampling() const {
return this->subsampling_;
}
inline DWConvMicrokernelTester& channels(uint32_t channels) {
assert(channels >= 1);
this->channels_ = channels;
return *this;
}
inline uint32_t channels() const {
return this->channels_;
}
inline DWConvMicrokernelTester& cr(uint32_t cr) {
assert(cr != 0);
assert((cr & (cr - 1)) == 0);
this->cr_ = cr;
return *this;
}
inline uint32_t cr() const {
return this->cr_;
}
inline uint32_t packedChannels() const {
return (channels() + (cr() - 1)) & -cr();
}
inline DWConvMicrokernelTester& kernelHeight(uint32_t kernelHeight) {
assert(kernelHeight != 0);
this->kernelHeight_ = kernelHeight;
return *this;
}
inline uint32_t kernelHeight() const {
return this->kernelHeight_;
}
inline DWConvMicrokernelTester& kernelWidth(uint32_t kernelWidth) {
assert(kernelWidth != 0);
this->kernelWidth_ = kernelWidth;
return *this;
}
inline uint32_t kernelWidth() const {
return this->kernelWidth_;
}
inline uint32_t kernelSize() const {
return kernelHeight() * kernelWidth();
}
inline DWConvMicrokernelTester& inputStride(uint32_t inputStride) {
assert(inputStride != 0);
this->inputStride_ = inputStride;
return *this;
}
inline uint32_t inputStride() const {
if (this->inputStride_ == 0) {
return channels();
} else {
assert(this->inputStride_ >= channels());
return this->inputStride_;
}
}
inline DWConvMicrokernelTester& outputStride(uint32_t outputStride) {
assert(outputStride != 0);
this->outputStride_ = outputStride;
return *this;
}
inline uint32_t outputStride() const {
if (this->outputStride_ == 0) {
return channels();
} else {
assert(this->outputStride_ >= channels());
return this->outputStride_;
}
}
inline DWConvMicrokernelTester& inputZeroPoint(uint8_t inputZeroPoint) {
this->inputZeroPoint_ = inputZeroPoint;
return *this;
}
inline uint8_t inputZeroPoint() const {
return this->inputZeroPoint_;
}
inline DWConvMicrokernelTester& kernelZeroPoint(uint8_t kernelZeroPoint) {
this->kernelZeroPoint_ = kernelZeroPoint;
return *this;
}
inline uint8_t kernelZeroPoint() const {
return this->kernelZeroPoint_;
}
inline DWConvMicrokernelTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline DWConvMicrokernelTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline DWConvMicrokernelTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void test(
pytorch_q8dwconv2d_up_ukernel_function q8dwconv,
bool per_channel = false) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto s32rng =
std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng);
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input(
(kernelSize() + (width() * subsampling() - 1) * kernelHeight() - 1) *
inputStride() +
channels() + 8);
std::vector<uint8_t> kernel(channels() * kernelSize());
std::vector<uint8_t, AlignedAllocator<uint8_t, 32>> packedWeights(
(kernelSize() + sizeof(int32_t) / sizeof(uint8_t)) * packedChannels());
std::vector<int32_t> bias(packedChannels());
std::vector<int32_t> accumulators(width() * channels());
std::vector<uint8_t> output((width() - 1) * outputStride() + channels());
std::vector<const uint8_t*> indirectInput(
kernelSize() + (width() * subsampling() - 1) * kernelHeight());
const uint8_t* inputPtr = input.data() + 8;
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::generate(kernel.begin(), kernel.end(), std::ref(u8rng));
std::generate(bias.begin(), bias.end(), std::ref(s32rng));
std::fill(accumulators.begin(), accumulators.end(), 0);
ASSERT_NE(
*std::max_element(input.cbegin(), input.cend()),
*std::min_element(input.cbegin(), input.cend()));
ASSERT_NE(
*std::max_element(kernel.cbegin(), kernel.cend()),
*std::min_element(kernel.cbegin(), kernel.cend()));
std::fill(packedWeights.begin(), packedWeights.end(), 0xA5);
size_t num_zero_points_padded = channels() + 8;
std::vector<uint8_t> kernel_zero_points(
num_zero_points_padded, 0);
if (per_channel) {
std::generate(
kernel_zero_points.begin(),
kernel_zero_points.begin() + channels(),
std::ref(u8rng));
}
pytorch_pack_q8dw_w(
kernelHeight(),
kernelWidth(),
channels(),
cr(),
#if !PYTORCH_QNNPACK_RUNTIME_QUANTIZATION
inputZeroPoint(),
kernel_zero_points.data(),
#endif
kernel.data(),
bias.data(),
packedWeights.data());
for (size_t i = 0;
i < kernelSize() + (width() * subsampling() - 1) * kernelHeight();
i++) {
indirectInput[i] = inputPtr + i * inputStride();
}
std::shuffle(indirectInput.begin(), indirectInput.end(), rng);
for (size_t x = 0; x < width(); x++) {
for (size_t c = 0; c < channels(); c++) {
int32_t acc = bias[c];
for (size_t kx = 0; kx < kernelWidth(); kx++) {
for (size_t ky = 0; ky < kernelHeight(); ky++) {
acc += (int32_t(indirectInput
[(x * subsampling() + kx) * kernelHeight() +
ky][c]) -
int32_t(inputZeroPoint())) *
(int32_t(
kernel[(c * kernelHeight() + ky) * kernelWidth() + kx]) -
int32_t(kernel_zero_points[c]));
}
}
accumulators[x * channels() + c] = acc;
}
}
const int32_t accumulatorsMin =
*std::min_element(accumulators.cbegin(), accumulators.cend());
const int32_t accumulatorsMax =
*std::max_element(accumulators.cbegin(), accumulators.cend());
const uint32_t accumulatorsRange =
uint32_t(accumulatorsMax) - uint32_t(accumulatorsMin);
ASSERT_NE(0, accumulatorsRange);
const double outputScale = accumulatorsRange >= 256
? double(accumulatorsRange) / 255.0
: 1.00001;
const uint8_t outputZeroPoint = uint8_t(std::max(
std::min(
lrint(
127.5 -
0.5 * double(accumulatorsMin + accumulatorsMax) /
outputScale),
long(std::numeric_limits<uint8_t>::max())),
long(std::numeric_limits<uint8_t>::min())));
std::vector<float> requantization_scales(num_zero_points_padded, 1.0f / float(outputScale));
if (per_channel) {
auto f32rng =
std::bind(std::uniform_real_distribution<float>(1, 5), rng);
auto scale_generator = [&]() -> float {return (f32rng()/outputScale);};
std::generate(
requantization_scales.begin(),
requantization_scales.end(),
std::ref(scale_generator));
}
const union pytorch_qnnp_conv_quantization_params quantizationParams =
pytorch_qnnp_compute_conv_quantization_params(
inputZeroPoint(),
kernel_zero_points.data(),
requantization_scales.data(),
outputZeroPoint,
qmin(),
qmax());
const union pytorch_qnnp_fp32_requantization_params
scalarRequantizationParams =
pytorch_qnnp_compute_scalar_fp32_requantization_params(
requantization_scales.data(), outputZeroPoint, qmin(), qmax());
q8dwconv(
channels(),
width(),
indirectInput.data(),
packedWeights.data(),
output.data(),
kernelHeight() * subsampling() * sizeof(void*),
(outputStride() - channels()) * sizeof(uint8_t),
&quantizationParams);
for (size_t x = 0; x < width(); x++) {
for (size_t c = 0; c < channels(); c++) {
#if defined(__arm__) || defined(_M_ARM)
const uint8_t referenceOutput = pytorch_qnnp_fp32_requantize_magic(
accumulators[x * channels() + c], scalarRequantizationParams, c);
#else
const uint8_t referenceOutput = pytorch_qnnp_fp32_requantize(
accumulators[x * channels() + c], scalarRequantizationParams, c);
#endif
const double scaledAccumulator =
accumulators[x * channels() + c] * requantization_scales[c] +
double(outputZeroPoint);
const double clampedAccumulator = std::max(
std::min(scaledAccumulator, double(qmax())), double(qmin()));
ASSERT_NEAR(
clampedAccumulator, double(output[x * outputStride() + c]), 0.6)
<< "x = " << x << ", channel = " << c;
ASSERT_EQ(
uint32_t(referenceOutput),
uint32_t(output[x * outputStride() + c]))
<< "x = " << x << ", channel = " << c;
}
}
}
}
void test(
pytorch_q8dwconv2d_mp_ukernel_function q8dwconv,
bool per_channel = false) const {
ASSERT_EQ(25, kernelSize())
<< "only 5x5 microkernel is currently supported";
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto s32rng =
std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng);
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input(
(kernelSize() + (width() * subsampling() - 1) * kernelHeight() - 1) *
inputStride() +
channels() + 8);
std::vector<uint8_t> kernel(channels() * kernelSize());
std::vector<uint8_t, AlignedAllocator<uint8_t, 32>> packedWeights(
(kernelSize() + sizeof(int32_t) / sizeof(uint8_t)) * packedChannels());
std::vector<int32_t> bias(packedChannels());
std::vector<int32_t> accumulators(width() * channels());
std::vector<int32_t> mpAcc(width() * packedChannels());
std::vector<uint8_t> output((width() - 1) * outputStride() + channels());
std::vector<const uint8_t*> indirectInput(
kernelSize() + (width() * subsampling() - 1) * kernelHeight());
const uint8_t* inputPtr = input.data() + 8;
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::generate(kernel.begin(), kernel.end(), std::ref(u8rng));
std::generate(bias.begin(), bias.end(), std::ref(s32rng));
std::fill(accumulators.begin(), accumulators.end(), 0);
std::fill(mpAcc.begin(), mpAcc.end(), 0xA5A55A5A);
ASSERT_NE(
*std::max_element(input.cbegin(), input.cend()),
*std::min_element(input.cbegin(), input.cend()));
ASSERT_NE(
*std::max_element(kernel.cbegin(), kernel.cend()),
*std::min_element(kernel.cbegin(), kernel.cend()));
std::fill(packedWeights.begin(), packedWeights.end(), 0xA5);
size_t num_zero_points_padded = channels() + 8;
std::vector<uint8_t> kernel_zero_points(num_zero_points_padded, this->kernelZeroPoint_);
if (per_channel) {
std::generate(
kernel_zero_points.begin(),
kernel_zero_points.end(),
std::ref(u8rng));
}
ASSERT_EQ(25, kernelSize())
<< "only 5x5 microkernel is currently supported";
pytorch_pack_q8dw_2d_w_dilation(
kernelHeight(),
kernelWidth(),
channels(),
cr(),
0,
kernelHeight(),
0,
2,
kernel.data(),
bias.data(),
packedWeights.data(),
true);
pytorch_pack_q8dw_2d_w_dilation(
kernelHeight(),
kernelWidth(),
channels(),
cr(),
0,
kernelHeight(),
2,
4,
kernel.data(),
bias.data(),
packedWeights.data() +
(10 + sizeof(int32_t) / sizeof(uint8_t)) * packedChannels(),
false);
pytorch_pack_q8dw_2d_w_dilation(
kernelHeight(),
kernelWidth(),
channels(),
cr(),
0,
kernelHeight(),
4,
5,
kernel.data(),
bias.data(),
packedWeights.data() +
(20 + sizeof(int32_t) / sizeof(uint8_t)) * packedChannels(),
false);
for (size_t i = 0;
i < kernelSize() + (width() * subsampling() - 1) * kernelHeight();
i++) {
indirectInput[i] = inputPtr + i * inputStride();
}
std::shuffle(indirectInput.begin(), indirectInput.end(), rng);
for (size_t x = 0; x < width(); x++) {
for (size_t c = 0; c < channels(); c++) {
int32_t acc = bias[c];
for (size_t kx = 0; kx < kernelWidth(); kx++) {
for (size_t ky = 0; ky < kernelHeight(); ky++) {
acc += (int32_t(indirectInput
[(x * subsampling() + kx) * kernelHeight() +
ky][c]) -
int32_t(inputZeroPoint())) *
(int32_t(
kernel[(c * kernelHeight() + ky) * kernelWidth() + kx]) -
int32_t(kernel_zero_points[c]));
}
}
accumulators[x * channels() + c] = acc;
}
}
const int32_t accumulatorsMin =
*std::min_element(accumulators.cbegin(), accumulators.cend());
const int32_t accumulatorsMax =
*std::max_element(accumulators.cbegin(), accumulators.cend());
const uint32_t accumulatorsRange =
uint32_t(accumulatorsMax) - uint32_t(accumulatorsMin);
ASSERT_NE(0, accumulatorsRange);
const double outputScale = accumulatorsRange >= 256
? double(accumulatorsRange) / 255.0
: 1.00001;
const uint8_t outputZeroPoint = uint8_t(std::max(
std::min(
lrint(
127.5 -
0.5 * double(accumulatorsMin + accumulatorsMax) /
outputScale),
long(std::numeric_limits<uint8_t>::max())),
long(std::numeric_limits<uint8_t>::min())));
std::vector<float> requantization_scales(num_zero_points_padded, 1.0f / float(outputScale));
if (per_channel) {
auto f32rng =
std::bind(std::uniform_real_distribution<float>(1, 5), rng);
auto scale_generator = [&]() -> float {return (f32rng()/outputScale);};
std::generate(
requantization_scales.begin(),
requantization_scales.end(),
std::ref(scale_generator));
}
const union pytorch_qnnp_conv_quantization_params quantizationParams =
pytorch_qnnp_compute_conv_quantization_params(
inputZeroPoint(),
kernel_zero_points.data(),
requantization_scales.data(),
outputZeroPoint,
qmin(),
qmax());
const union pytorch_qnnp_fp32_requantization_params
scalarRequantizationParams =
pytorch_qnnp_compute_scalar_fp32_requantization_params(
requantization_scales.data(), outputZeroPoint, qmin(), qmax());
q8dwconv(
channels(),
width(),
indirectInput.data(),
packedWeights.data(),
mpAcc.data(),
output.data(),
kernelHeight() * subsampling() * sizeof(void*),
(outputStride() - channels()) * sizeof(uint8_t),
&quantizationParams);
for (size_t x = 0; x < width(); x++) {
for (size_t c = 0; c < channels(); c++) {
#if defined(__arm__) || defined(_M_ARM)
const uint8_t referenceOutput = pytorch_qnnp_fp32_requantize_magic(
accumulators[x * channels() + c], scalarRequantizationParams, c);
#else
const uint8_t referenceOutput = pytorch_qnnp_fp32_requantize(
accumulators[x * channels() + c], scalarRequantizationParams, c);
#endif
const double scaledAccumulator =
accumulators[x * channels() + c] * requantization_scales[c] +
double(outputZeroPoint);
const double clampedAccumulator = std::max(
std::min(scaledAccumulator, double(qmax())), double(qmin()));
ASSERT_NEAR(
clampedAccumulator, double(output[x * outputStride() + c]), 0.6)
<< "x = " << x << ", channel = " << c;
ASSERT_EQ(
uint32_t(referenceOutput),
uint32_t(output[x * outputStride() + c]))
<< "x = " << x << ", channel = " << c;
}
}
}
}
private:
uint32_t channels_{1};
uint32_t cr_{1};
uint32_t width_{1};
uint32_t subsampling_{1};
uint32_t kernelHeight_{1};
uint32_t kernelWidth_{1};
uint32_t inputStride_{0};
uint32_t outputStride_{0};
uint8_t inputZeroPoint_{127};
uint8_t kernelZeroPoint_{127};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{3};
};
| 18,463
| 32.509982
| 98
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/fully-connected-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <memory>
#include <pytorch_qnnpack.h>
#include <qnnpack_func.h>
#include <qnnpack/AlignedAllocator.h>
class FullyConnectedOperatorTester {
public:
inline FullyConnectedOperatorTester& inputChannels(size_t inputChannels) {
assert(inputChannels >= 1);
this->inputChannels_ = inputChannels;
return *this;
}
inline size_t inputChannels() const {
return this->inputChannels_;
}
inline FullyConnectedOperatorTester& outputChannels(size_t outputChannels) {
assert(outputChannels >= 1);
this->outputChannels_ = outputChannels;
return *this;
}
inline size_t outputChannels() const {
return this->outputChannels_;
}
inline FullyConnectedOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline FullyConnectedOperatorTester& inputStride(size_t inputStride) {
assert(inputStride >= 1);
this->inputStride_ = inputStride;
return *this;
}
inline size_t inputStride() const {
if (this->inputStride_ == 0) {
return inputChannels();
} else {
assert(this->inputStride_ >= inputChannels());
return this->inputStride_;
}
}
inline FullyConnectedOperatorTester& outputStride(size_t outputStride) {
assert(outputStride >= 1);
this->outputStride_ = outputStride;
return *this;
}
inline size_t outputStride() const {
if (this->outputStride_ == 0) {
return outputChannels();
} else {
assert(this->outputStride_ >= outputChannels());
return this->outputStride_;
}
}
inline FullyConnectedOperatorTester& per_channel(bool per_channel) {
this->per_channel_ = per_channel;
return *this;
}
inline bool per_channel() const {
return this->per_channel_;
}
inline FullyConnectedOperatorTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline FullyConnectedOperatorTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline FullyConnectedOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
enum class Mode {
Static,
Dynamic,
Runtime,
};
void testQ8(const Mode mode) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto s32rng =
std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng);
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
auto f32rng =
std::bind(std::uniform_real_distribution<float>(1, 5), rng);
std::vector<uint8_t> input(
(batchSize() - 1) * inputStride() + inputChannels() + 8);
std::vector<uint8_t> kernel(outputChannels() * inputChannels());
std::vector<int32_t> bias(outputChannels());
std::vector<uint8_t> output(
(batchSize() - 1) * outputStride() + outputChannels());
std::vector<float> output_dynamic(output.size());
std::vector<int32_t> accumulators(batchSize() * outputChannels());
const uint8_t* const inputPtr = input.data() + 8;
const uint8_t inputZeroPoint = 127;
// Make number of output channels multiple of 8.
// This is the least common denominator for SSE/ARM kernels we have.
size_t num_zero_points_padded = outputChannels() + 8;
std::vector<uint8_t> kernelZeroPoints(num_zero_points_padded, 127);
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::generate(kernel.begin(), kernel.end(), std::ref(u8rng));
std::generate(bias.begin(), bias.end(), std::ref(s32rng));
if (per_channel()) {
std::generate(kernelZeroPoints.begin(), kernelZeroPoints.end(), std::ref(u8rng));
}
std::fill(output.begin(), output.end(), 0xA5);
std::fill(output_dynamic.begin(), output_dynamic.end(), 0.0f);
std::fill(accumulators.begin(), accumulators.end(), 0);
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oc = 0; oc < outputChannels(); oc++) {
accumulators[i * outputChannels() + oc] = bias[oc];
}
}
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oc = 0; oc < outputChannels(); oc++) {
for (size_t ic = 0; ic < inputChannels(); ic++) {
accumulators[i * outputChannels() + oc] +=
(int32_t(inputPtr[i * inputStride() + ic]) -
int32_t(inputZeroPoint)) *
(int32_t(kernel[oc * inputChannels() + ic]) -
int32_t(kernelZeroPoints[oc]));
}
}
}
// Create dummy min/max for empty inputs.
// These are only used to compute scale and zero point,
// and real callers will just pull those values from the model.
const int32_t accumulatorsMin = accumulators.empty()
? 0
: *std::min_element(accumulators.cbegin(), accumulators.cend());
const int32_t accumulatorsMax = accumulators.empty()
? 900
: *std::max_element(accumulators.cbegin(), accumulators.cend());
const double outputScale =
double(uint32_t(accumulatorsMax - accumulatorsMin)) / 255.0;
const uint8_t outputZeroPoint = uint8_t(std::max(
std::min(
lrint(
127.5 -
0.5 * double(accumulatorsMin + accumulatorsMax) /
outputScale),
long(std::numeric_limits<uint8_t>::max())),
long(std::numeric_limits<uint8_t>::min())));
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
// 1 bcz input_scale and kernel_scale are both 1.
std::vector<float>
requantization_scales(num_zero_points_padded, 1.0 * 1.0 / outputScale);
if (per_channel()) {
auto scale_generator = [&]() -> float {return (f32rng()/outputScale);};
std::generate(
requantization_scales.begin(),
requantization_scales.end(),
std::ref(scale_generator));
}
switch(mode) {
case Mode::Static:
{
pytorch_qnnp_operator_t convolution = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_fully_connected_nc_q8(
inputChannels(),
outputChannels(),
inputZeroPoint,
kernelZeroPoints.data(),
kernel.data(),
bias.data(),
outputZeroPoint,
qmin(),
qmax(),
0,
requantization_scales.data(),
&convolution));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_fully_connected_nc_q8(
convolution,
batchSize(),
inputPtr,
inputStride(),
output.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(convolution, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_delete_operator(convolution));
convolution = nullptr;
}
break;
case Mode::Dynamic:
{
auto packW = std::unique_ptr<qnnpack::PackBMatrix>(
new qnnpack::PackBMatrix(
inputChannels(),
outputChannels(),
kernelZeroPoints.data(),
requantization_scales.data(),
kernel.data(),
nullptr));
// Attention! Bias size must be a multiple of 8.
constexpr size_t kBiasSizeMultiple = 8u;
std::vector<float, AlignedAllocator<float, 32>> bias_float(
(bias.size() + (kBiasSizeMultiple - 1)) & -kBiasSizeMultiple);
std::copy(bias.cbegin(), bias.cend(), bias_float.begin());
const pytorch_qnnp_status runStatus = qnnpack::qnnpackLinearDynamic(
batchSize() /* batch_size */,
inputChannels() /* input_channels */,
outputChannels() /* output_channels */,
inputZeroPoint,
kernelZeroPoints.data(),
requantization_scales.data(), /* Dequantization scale */
inputPtr,
inputChannels() /* input_stride */,
packW->getPackedWeights(),
bias_float.data(),
output_dynamic.data(),
outputStride() /* output_stride */,
nullptr /* threadpool */);
ASSERT_EQ(pytorch_qnnp_status_success, runStatus);
}
break;
case Mode::Runtime:
{
auto packW = std::unique_ptr<qnnpack::PackBMatrix>(
new qnnpack::PackBMatrix(
inputChannels(),
outputChannels(),
kernelZeroPoints.data(),
requantization_scales.data(),
kernel.data(),
bias.data()));
const pytorch_qnnp_status runStatus = qnnpack::qnnpackLinear(
batchSize() /* batch_size */,
inputChannels() /* input_channels */,
outputChannels() /* output_channels */,
inputZeroPoint,
kernelZeroPoints.data(),
requantization_scales.data(),
outputZeroPoint,
qmin(),
qmax(),
inputPtr,
inputChannels() /* input_stride */,
packW->getPackedWeights(),
output.data(),
outputStride() /* output_stride */,
nullptr /* threadpool */);
ASSERT_EQ(pytorch_qnnp_status_success, runStatus);
}
break;
default:
// Undefined!
ASSERT_TRUE(false);
}
switch (mode) {
case Mode::Static:
case Mode::Runtime:
{
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < outputChannels(); c++) {
const double scaledAccumulator =
accumulators[i * outputChannels() + c] *
requantization_scales[c];
const double clampedAccumulator = std::max(
std::min(
scaledAccumulator, double(qmax()) - double(outputZeroPoint)),
double(qmin()) - double(outputZeroPoint));
ASSERT_NEAR(
clampedAccumulator,
(int32_t(output[i * outputStride() + c]) - outputZeroPoint),
0.9)
<< "batch index = " << i << ", channel = " << c;
}
}
}
break;
case Mode::Dynamic:
{
// Bias is added post scaling, as float.
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oc = 0; oc < outputChannels(); oc++) {
accumulators[i * outputChannels() + oc] -= bias[oc];
}
}
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < outputChannels(); c++) {
const float ref = ((float)accumulators[i * outputChannels() + c] *
requantization_scales[c]) +
float(bias[c]);
ASSERT_NEAR(
output_dynamic[i * outputChannels() + c],
ref,
std::abs(ref) * 1.0e-4)
<< "at " << i << ", " << c << ": reference = " << ref
<< ", optimized = "
<< output_dynamic[i * outputChannels() + c];
}
}
}
break;
default:
// Undefined!
ASSERT_TRUE(false);
}
}
}
private:
size_t inputChannels_{1};
size_t inputStride_{0};
size_t outputChannels_{1};
size_t outputStride_{0};
size_t batchSize_{1};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{1};
bool per_channel_{false};
};
| 12,678
| 31.344388
| 89
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/fully-connected-sparse-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <memory>
#include <pack_block_sparse.h>
#include <pytorch_qnnpack.h>
#include <qnnpack_func.h>
#include <qnnpack/AlignedAllocator.h>
#define MAYBE_UNUSED __attribute__((unused))
namespace {
void fillBlockSparseWeights(
uint8_t* b,
size_t N,
size_t K,
size_t row_block_size,
size_t col_block_size,
float sparsity,
const uint8_t* zero_points) {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
std::bernoulli_distribution dist{sparsity};
for (uint32_t n = 0; n < N ; n += row_block_size) {
for (uint32_t k = 0; k < K; k += col_block_size) {
if (dist(rng)) {
for (uint32_t nb = 0; (nb < row_block_size) && (n + nb < N); ++nb) {
for (uint32_t kb = 0; (kb < col_block_size) && (k + kb < K); ++kb) {
*(b + (n + nb) * K + k + kb) = zero_points[n + nb];
}
}
}
}
}
}
// Temp Debug utils that will be removed later
MAYBE_UNUSED void printMatrix(const char* name, const uint8_t* a, const size_t M, const size_t N) {
std::cout << "Matrix START:" << name << "...\n";
for (uint32_t m = 0; m < M ; ++m) {
for (uint32_t n = 0; n < N; n++) {
std::cout << (const uint32_t)(*(a + m * N + n)) << ", ";
}
std::cout << std::endl;
}
std::cout << "Matrix END...\n\n";
}
MAYBE_UNUSED void printMatrix(const char* name, const float* a, const size_t M, const size_t N) {
std::cout << "Matrix START:" << name << "...\n";
for (uint32_t m = 0; m < M ; ++m) {
for (uint32_t n = 0; n < N; n++) {
std::cout << (*(a + m * N + n)) << ", ";
}
std::cout << std::endl;
}
std::cout << "Matrix END...\n\n";
}
}
class FullyConnectedSparseOperatorTester {
public:
inline FullyConnectedSparseOperatorTester& inputChannels(size_t inputChannels) {
assert(inputChannels >= 1);
this->inputChannels_ = inputChannels;
return *this;
}
inline size_t inputChannels() const {
return this->inputChannels_;
}
inline FullyConnectedSparseOperatorTester& outputChannels(size_t outputChannels) {
assert(outputChannels >= 1);
this->outputChannels_ = outputChannels;
return *this;
}
inline size_t outputChannels() const {
return this->outputChannels_;
}
inline FullyConnectedSparseOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline FullyConnectedSparseOperatorTester& inputStride(size_t inputStride) {
assert(inputStride >= 1);
this->inputStride_ = inputStride;
return *this;
}
inline size_t inputStride() const {
if (this->inputStride_ == 0) {
return inputChannels();
} else {
assert(this->inputStride_ >= inputChannels());
return this->inputStride_;
}
}
inline FullyConnectedSparseOperatorTester& outputStride(size_t outputStride) {
assert(outputStride >= 1);
this->outputStride_ = outputStride;
return *this;
}
inline size_t outputStride() const {
if (this->outputStride_ == 0) {
return outputChannels();
} else {
assert(this->outputStride_ >= outputChannels());
return this->outputStride_;
}
}
inline FullyConnectedSparseOperatorTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline FullyConnectedSparseOperatorTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline FullyConnectedSparseOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
inline FullyConnectedSparseOperatorTester& rowBlockSize(size_t block_size) {
this->rowBlockSize_ = block_size;
return *this;
}
inline FullyConnectedSparseOperatorTester& colBlockSize(size_t block_size) {
this->colBlockSize_ = block_size;
return *this;
}
inline FullyConnectedSparseOperatorTester& sparsity(float s) {
this->sparsity_ = s;
return *this;
}
inline size_t rowBlockSize() const {
return this->rowBlockSize_;
}
inline size_t colBlockSize() const {
return this->colBlockSize_;
}
inline float sparsity() const {
return this->sparsity_;
}
enum class Mode {
Dynamic,
Runtime,
};
void testQ8(const Mode mode) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto s32rng =
std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng);
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
auto f32rng =
std::bind(std::uniform_real_distribution<float>(1, 5), rng);
std::vector<uint8_t> input(
(batchSize() - 1) * inputStride() + inputChannels() + 8);
std::vector<uint8_t> kernel(outputChannels() * inputChannels());
std::vector<int32_t> bias(outputChannels());
std::vector<uint8_t> output(
(batchSize() - 1) * outputStride() + outputChannels());
std::vector<float> output_dynamic(output.size());
std::vector<int32_t> accumulators(batchSize() * outputChannels());
std::vector<float> accumulators_float(batchSize() * outputChannels());
const uint8_t* const inputPtr = input.data();
const uint8_t inputZeroPoint = 127;
// Make number of output channels multiple of 8.
// This is the least common denominator for SSE/ARM kernels we have.
size_t num_zero_points_padded = outputChannels() + 8;
std::vector<uint8_t> kernelZeroPoints(num_zero_points_padded, 127);
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::generate(bias.begin(), bias.end(), std::ref(s32rng));
std::generate(kernelZeroPoints.begin(), kernelZeroPoints.end(), std::ref(u8rng));
uint8_t max_elem, min_elem;
do {
std::generate(kernel.begin(), kernel.end(), std::ref(u8rng));
fillBlockSparseWeights(
kernel.data(),
outputChannels(),
inputChannels(),
rowBlockSize(),
colBlockSize(),
sparsity(),
kernelZeroPoints.data());
max_elem = *std::max_element(kernel.cbegin(), kernel.cend());
min_elem = *std::min_element(kernel.cbegin(), kernel.cend());
} while (max_elem == min_elem);
std::unique_ptr<qnnpack::BCSRMatrix> bcsr_matrix =
qnnpack::generateBlockCSRMatrix<uint32_t>(
kernel.data(),
outputChannels(),
inputChannels(),
rowBlockSize(),
colBlockSize(),
kernelZeroPoints.data());
std::fill(output.begin(), output.end(), 0xA5);
std::fill(output_dynamic.begin(), output_dynamic.end(), 0.0f);
std::fill(accumulators.begin(), accumulators.end(), 0);
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oc = 0; oc < outputChannels(); oc++) {
accumulators[i * outputChannels() + oc] = bias[oc];
}
}
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oc = 0; oc < outputChannels(); oc++) {
for (size_t ic = 0; ic < inputChannels(); ic++) {
accumulators[i * outputChannels() + oc] +=
(int32_t(inputPtr[i * inputStride() + ic]) -
int32_t(inputZeroPoint)) *
(int32_t(kernel[oc * inputChannels() + ic]) -
int32_t(kernelZeroPoints[oc]));
}
}
}
// Create dummy min/max for empty inputs.
// These are only used to compute scale and zero point,
// and real callers will just pull those values from the model.
const int32_t accumulatorsMin = accumulators.empty()
? 0
: *std::min_element(accumulators.cbegin(), accumulators.cend());
const int32_t accumulatorsMax = accumulators.empty()
? 900
: *std::max_element(accumulators.cbegin(), accumulators.cend());
const double outputScale =
double(uint32_t(accumulatorsMax - accumulatorsMin)) / 255.0;
const uint8_t outputZeroPoint = uint8_t(std::max(
std::min(
lrint(
127.5 -
0.5 * double(accumulatorsMin + accumulatorsMax) /
outputScale),
long(std::numeric_limits<uint8_t>::max())),
long(std::numeric_limits<uint8_t>::min())));
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
// 1 bcz input_scale and kernel_scale are both 1.
std::vector<float>
requantization_scales(num_zero_points_padded, 1.0 * 1.0 / outputScale);
auto scale_generator = [&]() -> float {return (f32rng()/outputScale);};
std::generate(
requantization_scales.begin(),
requantization_scales.end(),
std::ref(scale_generator));
switch(mode) {
case Mode::Runtime:
break;
case Mode::Dynamic: {
// Attention! Bias size must be a multiple of 8.
constexpr size_t kBiasSizeMultiple = 8u;
std::vector<float, AlignedAllocator<float, 32>> bias_float(
(bias.size() + (kBiasSizeMultiple - 1)) & -kBiasSizeMultiple);
std::copy(bias.cbegin(), bias.cend(), bias_float.begin());
pytorch_qnnp_operator_t sparse_gemm = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_fully_connected_sparse_dq_nc_q8(
inputChannels(),
outputChannels(),
inputZeroPoint,
kernelZeroPoints.data(),
bcsr_matrix->col_indices_data_ptr(),
bcsr_matrix->row_values_data_ptr(),
bcsr_matrix->values.data(),
bcsr_matrix->row_block_size,
bcsr_matrix->col_block_size,
pytorch_qnnp_sparse_matrix_indices_dtype_uint32_t,
outputZeroPoint,
qmin(),
qmax(),
0,
requantization_scales.data(),
false,
&sparse_gemm));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_fully_connected_sparse_dq_nc_q8(
sparse_gemm,
batchSize(),
inputPtr,
inputStride(),
bias_float.data(),
output_dynamic.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(sparse_gemm, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_delete_operator(sparse_gemm));
sparse_gemm = nullptr;
break;
}
default:
// Undefined!
ASSERT_TRUE(false);
}
switch (mode) {
case Mode::Runtime:
break;
case Mode::Dynamic:
{
// Bias is added post scaling, as float.
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oc = 0; oc < outputChannels(); oc++) {
accumulators[i * outputChannels() + oc] -= bias[oc];
accumulators_float[i * outputChannels() + oc] =
(float)accumulators[i * outputChannels() + oc] *
requantization_scales[oc] + float(bias[oc]);
}
}
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < outputChannels(); c++) {
ASSERT_EQ(
output_dynamic[i * outputChannels() + c],
accumulators_float[i * outputChannels() + c])
<< "at " << i << ", " << c
<< ": reference = " <<
accumulators_float[i * outputChannels() + c]
<< ", optimized = " << output_dynamic[i * outputChannels() + c];
}
}
}
break;
default:
// Undefined!
ASSERT_TRUE(false);
}
}
}
void testQ8_prepacked(const Mode mode) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto s32rng =
std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng);
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
auto f32rng =
std::bind(std::uniform_real_distribution<float>(1, 5), rng);
std::vector<uint8_t> input(
(batchSize() - 1) * inputStride() + inputChannels() + 8);
std::vector<uint8_t> kernel(outputChannels() * inputChannels());
std::vector<int32_t> bias(outputChannels());
std::vector<uint8_t> output(
(batchSize() - 1) * outputStride() + outputChannels());
std::vector<float> output_dynamic(output.size());
std::vector<int32_t> accumulators(batchSize() * outputChannels());
std::vector<float> accumulators_float(batchSize() * outputChannels());
const uint8_t* const inputPtr = input.data();
const uint8_t inputZeroPoint = 127;
// Make number of output channels multiple of 8.
// This is the least common denominator for SSE/ARM kernels we have.
size_t num_zero_points_padded = outputChannels() + 8;
std::vector<uint8_t> kernelZeroPoints(num_zero_points_padded, 127);
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::generate(bias.begin(), bias.end(), std::ref(s32rng));
std::generate(kernelZeroPoints.begin(), kernelZeroPoints.end(), std::ref(u8rng));
uint8_t max_elem, min_elem;
do {
std::generate(kernel.begin(), kernel.end(), std::ref(u8rng));
fillBlockSparseWeights(
kernel.data(),
outputChannels(),
inputChannels(),
rowBlockSize(),
colBlockSize(),
sparsity(),
kernelZeroPoints.data());
max_elem = *std::max_element(kernel.cbegin(), kernel.cend());
min_elem = *std::min_element(kernel.cbegin(), kernel.cend());
} while (max_elem == min_elem);
std::unique_ptr<qnnpack::BCSRMatrix> bcsr_matrix =
qnnpack::generateBlockCSRMatrix<uint32_t>(
kernel.data(),
outputChannels(),
inputChannels(),
rowBlockSize(),
colBlockSize(),
kernelZeroPoints.data());
std::fill(output.begin(), output.end(), 0xA5);
std::fill(output_dynamic.begin(), output_dynamic.end(), 0.0f);
std::fill(accumulators.begin(), accumulators.end(), 0);
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oc = 0; oc < outputChannels(); oc++) {
accumulators[i * outputChannels() + oc] = bias[oc];
}
}
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oc = 0; oc < outputChannels(); oc++) {
for (size_t ic = 0; ic < inputChannels(); ic++) {
accumulators[i * outputChannels() + oc] +=
(int32_t(inputPtr[i * inputStride() + ic]) -
int32_t(inputZeroPoint)) *
(int32_t(kernel[oc * inputChannels() + ic]) -
int32_t(kernelZeroPoints[oc]));
}
}
}
// Create dummy min/max for empty inputs.
// These are only used to compute scale and zero point,
// and real callers will just pull those values from the model.
const int32_t accumulatorsMin = accumulators.empty()
? 0
: *std::min_element(accumulators.cbegin(), accumulators.cend());
const int32_t accumulatorsMax = accumulators.empty()
? 900
: *std::max_element(accumulators.cbegin(), accumulators.cend());
const double outputScale =
double(uint32_t(accumulatorsMax - accumulatorsMin)) / 255.0;
const uint8_t outputZeroPoint = uint8_t(std::max(
std::min(
lrint(
127.5 -
0.5 * double(accumulatorsMin + accumulatorsMax) /
outputScale),
long(std::numeric_limits<uint8_t>::max())),
long(std::numeric_limits<uint8_t>::min())));
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
// 1 bcz input_scale and kernel_scale are both 1.
std::vector<float>
requantization_scales(num_zero_points_padded, 1.0 * 1.0 / outputScale);
auto scale_generator = [&]() -> float {return (f32rng()/outputScale);};
std::generate(
requantization_scales.begin(),
requantization_scales.end(),
std::ref(scale_generator));
switch(mode) {
case Mode::Runtime:
break;
case Mode::Dynamic: {
// Attention! Bias size must be a multiple of 8.
constexpr size_t kBiasSizeMultiple = 8u;
std::vector<float, AlignedAllocator<float, 32>> bias_float(
(bias.size() + (kBiasSizeMultiple - 1)) & -kBiasSizeMultiple);
std::copy(bias.cbegin(), bias.cend(), bias_float.begin());
pytorch_qnnp_operator_t sparse_gemm = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_fully_connected_sparse_dq_nc_q8(
inputChannels(),
outputChannels(),
inputZeroPoint,
kernelZeroPoints.data(),
bcsr_matrix->col_indices_data_ptr(),
bcsr_matrix->row_values_data_ptr(),
bcsr_matrix->values.data(),
bcsr_matrix->row_block_size,
bcsr_matrix->col_block_size,
pytorch_qnnp_sparse_matrix_indices_dtype_uint32_t,
outputZeroPoint,
qmin(),
qmax(),
0,
requantization_scales.data(),
true,
&sparse_gemm));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_fully_connected_sparse_dq_nc_q8(
sparse_gemm,
batchSize(),
inputPtr,
inputStride(),
bias_float.data(),
output_dynamic.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(sparse_gemm, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_delete_operator(sparse_gemm));
sparse_gemm = nullptr;
break;
}
default:
// Undefined!
ASSERT_TRUE(false);
}
switch (mode) {
case Mode::Runtime:
break;
case Mode::Dynamic:
{
// Bias is added post scaling, as float.
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oc = 0; oc < outputChannels(); oc++) {
accumulators[i * outputChannels() + oc] -= bias[oc];
accumulators_float[i * outputChannels() + oc] =
(float)accumulators[i * outputChannels() + oc] *
requantization_scales[oc] + float(bias[oc]);
}
}
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < outputChannels(); c++) {
ASSERT_FLOAT_EQ(
output_dynamic[i * outputChannels() + c],
accumulators_float[i * outputChannels() + c])
<< "at " << i << ", " << c
<< ": reference = " <<
accumulators_float[i * outputChannels() + c]
<< ", optimized = " << output_dynamic[i * outputChannels() + c];
}
}
}
break;
default:
// Undefined!
ASSERT_TRUE(false);
}
}
}
private:
size_t inputChannels_{1};
size_t inputStride_{0};
size_t outputChannels_{1};
size_t outputStride_{0};
size_t batchSize_{1};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{1};
float sparsity_{0.7f};
size_t rowBlockSize_{1};
size_t colBlockSize_{4};
};
| 21,081
| 33.335505
| 101
|
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.